aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c2
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c43
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c6
-rw-r--r--drivers/net/ethernet/8390/lib8390.c4
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c50
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.h1
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c66
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c60
-rw-r--r--drivers/net/ethernet/altera/altera_tse.h1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c26
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c17
-rw-r--r--drivers/net/ethernet/amd/7990.c12
-rw-r--r--drivers/net/ethernet/amd/a2065.c9
-rw-r--r--drivers/net/ethernet/amd/atarilance.c2
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c63
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.h1
-rw-r--r--drivers/net/ethernet/amd/declance.c2
-rw-r--r--drivers/net/ethernet/amd/lance.c2
-rw-r--r--drivers/net/ethernet/amd/ni65.c4
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c4
-rw-r--r--drivers/net/ethernet/amd/sunlance.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_cle.h2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c22
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c257
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h11
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c230
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h35
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c239
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h8
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c66
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h3
-rw-r--r--drivers/net/ethernet/arc/emac.h1
-rw-r--r--drivers/net/ethernet/arc/emac_main.c86
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c31
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c.h3
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c11
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e.h1
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c12
-rw-r--r--drivers/net/ethernet/aurora/nb8800.c74
-rw-r--r--drivers/net/ethernet/aurora/nb8800.h1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig44
-rw-r--r--drivers/net/ethernet/broadcom/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c59
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c266
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c315
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c185
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c818
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h117
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c286
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c1139
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h123
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c547
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h520
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c47
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h3
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c54
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c28
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c57
-rw-r--r--drivers/net/ethernet/cadence/macb.c229
-rw-r--r--drivers/net/ethernet/cadence/macb.h3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.c61
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn66xx_device.h5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.c13
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_device.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_ethtool.c1009
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c1420
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h408
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_config.h16
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_console.c50
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c264
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.h52
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.c213
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_droq.h41
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_iq.h85
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_main.h25
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c24
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_network.h252
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.c67
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_nic.h154
-rw-r--r--drivers/net/ethernet/cavium/liquidio/request_manager.c313
-rw-r--r--drivers/net/ethernet/cavium/liquidio/response_manager.c30
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c108
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c27
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c22
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c66
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c95
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h2
-rw-r--r--drivers/net/ethernet/chelsio/Kconfig16
-rw-r--r--drivers/net/ethernet/chelsio/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/Makefile1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h45
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c100
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c375
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c244
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c339
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c401
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h45
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c139
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/Makefile3
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c (renamed from drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c)46
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h (renamed from drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h)38
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c12
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c28
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c5
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c23
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c13
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c45
-rw-r--r--drivers/net/ethernet/dec/tulip/pnic.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/dnet.c48
-rw-r--r--drivers/net/ethernet/dnet.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/Kconfig8
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h58
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c160
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h16
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c66
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c344
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/ethoc.c63
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c30
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c297
-rw-r--r--drivers/net/ethernet/fealnx.c2
-rw-r--r--drivers/net/ethernet/freescale/fec.h5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c148
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c57
-rw-r--r--drivers/net/ethernet/freescale/fman/fman.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_muram.h4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c45
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet.h1
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fcc.c4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c6
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c69
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h4
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c52
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c17
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig14
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile1
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c1007
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c46
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h20
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c153
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c8
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c453
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h17
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c483
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h66
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c455
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c83
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c194
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h65
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c10
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c175
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c74
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c204
-rw-r--r--drivers/net/ethernet/hp/hp100.c2
-rw-r--r--drivers/net/ethernet/i825xx/82596.c2
-rw-r--r--drivers/net/ethernet/i825xx/lib82596.c2
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c4
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c9
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c38
-rw-r--r--drivers/net/ethernet/ibm/emac/phy.c26
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c478
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h6
-rw-r--r--drivers/net/ethernet/intel/Kconfig123
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000.h2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c8
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c12
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c36
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h112
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c61
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c47
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c240
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h10
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/Makefile7
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k.h55
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c355
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_iov.c8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c169
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_mbx.h6
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c90
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c529
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c179
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.h21
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_ptp.c462
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c44
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.h4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_type.h30
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.c75
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_vf.h14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h48
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h82
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c227
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_devids.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c396
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c14
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1294
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_nvm.c86
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h22
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c1087
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h114
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h42
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h45
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c513
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h11
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h70
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_devids.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c1042
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h114
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h51
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h45
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h49
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c329
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c507
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c153
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c8
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h30
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h108
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c10
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h6
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h45
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c21
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c243
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c156
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h4
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c196
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h97
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c250
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c62
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1179
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c46
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_model.h18
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c118
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h303
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c35
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c693
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h30
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c230
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h39
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c318
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c308
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h7
-rw-r--r--drivers/net/ethernet/jme.c2
-rw-r--r--drivers/net/ethernet/korina.c8
-rw-r--r--drivers/net/ethernet/lantiq_etop.c41
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c5
-rw-r--r--drivers/net/ethernet/marvell/mvneta_bm.c1
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c50
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c88
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c503
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/alloc.c93
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c280
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c399
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c140
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c317
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h77
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c524
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h758
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c742
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c161
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c957
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c1087
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c586
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1989
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c431
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c788
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c335
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h358
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c262
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c118
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c1082
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h125
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c646
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c202
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c546
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c318
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c158
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c186
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rl.c209
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/srq.c265
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vxlan.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/cmd.h75
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c764
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h99
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c78
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h2079
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c3021
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h405
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c997
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c486
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c1863
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c516
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h9
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c7
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c4
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c41
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c4
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net.h24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c1143
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h10
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c24
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c32
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c11
-rw-r--r--drivers/net/ethernet/netx-eth.c12
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c4
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c65
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c12
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c2
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c28
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.h1
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c30
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig21
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/Makefile4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h112
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c1517
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h37
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c2313
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h108
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c1427
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h88
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h11004
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.c122
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c325
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c235
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h39
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c831
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h239
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c382
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c417
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h83
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h94
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.c76
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.h40
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h62
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c348
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c97
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c3859
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h395
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c1141
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h999
-rw-r--r--drivers/net/ethernet/qlogic/qede/Makefile1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h54
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_dcbnl.c348
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ethtool.c732
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c698
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c40
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c95
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c6
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c4
-rw-r--r--drivers/net/ethernet/rdc/r6040.c91
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/realtek/8139too.c12
-rw-r--r--drivers/net/ethernet/realtek/atp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c81
-rw-r--r--drivers/net/ethernet/renesas/ravb.h206
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c299
-rw-r--r--drivers/net/ethernet/renesas/ravb_ptp.c26
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c48
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c3
-rw-r--r--drivers/net/ethernet/rocker/rocker_ofdpa.c4
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h1
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c31
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c32
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c763
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c44
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h3
-rw-r--r--drivers/net/ethernet/sfc/efx.c98
-rw-r--r--drivers/net/ethernet/sfc/efx.h9
-rw-r--r--drivers/net/ethernet/sfc/farch.c3
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h1327
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c7
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h31
-rw-r--r--drivers/net/ethernet/sfc/nic.h5
-rw-r--r--drivers/net/ethernet/sfc/rx.c102
-rw-r--r--drivers/net/ethernet/sgi/meth.c4
-rw-r--r--drivers/net/ethernet/sis/sis900.c2
-rw-r--r--drivers/net/ethernet/smsc/epic100.c2
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c6
-rw-r--r--drivers/net/ethernet/smsc/smc9194.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c24
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h67
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c289
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c60
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c274
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h36
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h83
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c165
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c260
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h86
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c156
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h268
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c443
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c389
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h129
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c354
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h202
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c225
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c349
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c67
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c710
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c104
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c24
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h159
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c30
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c132
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c12
-rw-r--r--drivers/net/ethernet/ti/Kconfig3
-rw-r--r--drivers/net/ethernet/ti/cpmac.c70
-rw-r--r--drivers/net/ethernet/ti/cpsw.c178
-rw-r--r--drivers/net/ethernet/ti/cpsw.h1
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c261
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h3
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c189
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c169
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c4
-rw-r--r--drivers/net/ethernet/ti/tlan.c3
-rw-r--r--drivers/net/ethernet/tile/tilegx.c6
-rw-r--r--drivers/net/ethernet/tile/tilepro.c8
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_wireless.c4
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c65
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c5
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/wiznet/Kconfig14
-rw-r--r--drivers/net/ethernet/wiznet/Makefile1
-rw-r--r--drivers/net/ethernet/wiznet/w5100-spi.c466
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c1075
-rw-r--r--drivers/net/ethernet/wiznet/w5100.h37
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h1
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c49
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c84
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c12
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c6
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c46
593 files changed, 74633 insertions, 25583 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 7677c745fb30..91ada52f776b 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -699,7 +699,7 @@ el3_tx_timeout (struct net_device *dev)
dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS),
inw(ioaddr + TX_FREE));
dev->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
outw(TxReset, ioaddr + EL3_CMD);
outw(TxEnable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 942fb0d5aace..b26e038b4a0e 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -992,7 +992,7 @@ static void corkscrew_timeout(struct net_device *dev)
if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress))
break;
outw(TxEnable, ioaddr + EL3_CMD);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
dev->stats.tx_dropped++;
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index b9948f00c5e9..b88afd759307 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -700,7 +700,7 @@ static void el3_tx_timeout(struct net_device *dev)
netdev_notice(dev, "Transmit timed out!\n");
dump_status(dev);
dev->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
tc574_wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index c5a320507556..71396e4b87e3 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -534,7 +534,7 @@ static void el3_tx_timeout(struct net_device *dev)
netdev_warn(dev, "Transmit timed out!\n");
dump_status(dev);
dev->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
/* Issue TX_RESET and TX_START commands. */
tc589_wait_for_completion(dev, TxReset);
outw(TxEnable, ioaddr + EL3_CMD);
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index d81fceddbe0e..25c55ab05c7d 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1944,7 +1944,7 @@ static void vortex_tx_timeout(struct net_device *dev)
}
/* Issue Tx Enable */
iowrite16(TxEnable, ioaddr + EL3_CMD);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
/*
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index c89b9aeeceb6..39ca9350d1b2 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -84,7 +84,6 @@ static u32 ax_msg_enable;
struct ax_device {
struct mii_bus *mii_bus;
struct mdiobb_ctrl bb_ctrl;
- struct phy_device *phy_dev;
void __iomem *addr_memr;
u8 reg_memr;
int link;
@@ -320,7 +319,7 @@ static void ax_block_output(struct net_device *dev, int count,
static void ax_handle_link_change(struct net_device *dev)
{
struct ax_device *ax = to_ax_dev(dev);
- struct phy_device *phy_dev = ax->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
int status_change = 0;
if (phy_dev->link && ((ax->speed != phy_dev->speed) ||
@@ -369,8 +368,6 @@ static int ax_mii_probe(struct net_device *dev)
phy_dev->supported &= PHY_BASIC_FEATURES;
phy_dev->advertising = phy_dev->supported;
- ax->phy_dev = phy_dev;
-
netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
phy_dev->drv->name, phydev_name(phy_dev), phy_dev->irq);
@@ -410,7 +407,7 @@ static int ax_open(struct net_device *dev)
ret = ax_mii_probe(dev);
if (ret)
goto failed_mii_probe;
- phy_start(ax->phy_dev);
+ phy_start(dev->phydev);
ret = ax_ei_open(dev);
if (ret)
@@ -421,7 +418,7 @@ static int ax_open(struct net_device *dev)
return 0;
failed_ax_ei_open:
- phy_disconnect(ax->phy_dev);
+ phy_disconnect(dev->phydev);
failed_mii_probe:
ax_phy_switch(dev, 0);
free_irq(dev->irq, dev);
@@ -442,7 +439,7 @@ static int ax_close(struct net_device *dev)
/* turn the phy off */
ax_phy_switch(dev, 0);
- phy_disconnect(ax->phy_dev);
+ phy_disconnect(dev->phydev);
free_irq(dev->irq, dev);
return 0;
@@ -450,8 +447,7 @@ static int ax_close(struct net_device *dev)
static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
- struct ax_device *ax = to_ax_dev(dev);
- struct phy_device *phy_dev = ax->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
@@ -474,28 +470,6 @@ static void ax_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
-static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ax_device *ax = to_ax_dev(dev);
- struct phy_device *phy_dev = ax->phy_dev;
-
- if (!phy_dev)
- return -ENODEV;
-
- return phy_ethtool_gset(phy_dev, cmd);
-}
-
-static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ax_device *ax = to_ax_dev(dev);
- struct phy_device *phy_dev = ax->phy_dev;
-
- if (!phy_dev)
- return -ENODEV;
-
- return phy_ethtool_sset(phy_dev, cmd);
-}
-
static u32 ax_get_msglevel(struct net_device *dev)
{
struct ei_device *ei_local = netdev_priv(dev);
@@ -512,12 +486,12 @@ static void ax_set_msglevel(struct net_device *dev, u32 v)
static const struct ethtool_ops ax_ethtool_ops = {
.get_drvinfo = ax_get_drvinfo,
- .get_settings = ax_get_settings,
- .set_settings = ax_set_settings,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_msglevel = ax_get_msglevel,
.set_msglevel = ax_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
#ifdef CONFIG_AX88796_93CX6
@@ -936,7 +910,8 @@ static int ax_probe(struct platform_device *pdev)
iounmap(ax->map2);
exit_mem2:
- release_mem_region(mem2->start, mem2_size);
+ if (mem2)
+ release_mem_region(mem2->start, mem2_size);
exit_mem1:
iounmap(ei_local->mem);
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index ec6eac1f8c95..4ea717d68c95 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -1041,7 +1041,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
{
ei_local->txing = 1;
NS8390_trigger_send(dev, send_length, output_page);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (output_page == ei_local->tx_start_page)
{
ei_local->tx1 = -1;
@@ -1270,7 +1270,7 @@ static void ei_tx_intr(struct net_device *dev)
{
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
ei_local->tx2 = -1,
ei_local->lasttx = 2;
}
@@ -1287,7 +1287,7 @@ static void ei_tx_intr(struct net_device *dev)
{
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
ei_local->tx1 = -1;
ei_local->lasttx = 1;
}
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index b96e8852b2d1..60f8e2c8e726 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -596,7 +596,7 @@ static void ei_tx_intr(struct net_device *dev)
if (ei_local->tx2 > 0) {
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx2, ei_local->tx_start_page + 6);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
ei_local->tx2 = -1,
ei_local->lasttx = 2;
} else
@@ -609,7 +609,7 @@ static void ei_tx_intr(struct net_device *dev)
if (ei_local->tx1 > 0) {
ei_local->txing = 1;
NS8390_trigger_send(dev, ei_local->tx1, ei_local->tx_start_page);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
ei_local->tx1 = -1;
ei_local->lasttx = 1;
} else
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index ac7288240d55..1d1069641d81 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1129,7 +1129,7 @@ static void tx_timeout(struct net_device *dev)
/* Trigger an immediate transmit demand. */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 74139cb7f849..38eaea18da23 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -310,7 +310,7 @@ static int bfin_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
static void bfin_mac_adjust_link(struct net_device *dev)
{
struct bfin_mac_local *lp = netdev_priv(dev);
- struct phy_device *phydev = lp->phydev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
int new_state = 0;
@@ -430,7 +430,6 @@ static int mii_probe(struct net_device *dev, int phy_mode)
lp->old_link = 0;
lp->old_speed = 0;
lp->old_duplex = -1;
- lp->phydev = phydev;
phy_attached_print(phydev, "mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
MDC_CLK, mdc_div, sclk / 1000000);
@@ -450,31 +449,6 @@ static irqreturn_t bfin_mac_wake_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int
-bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct bfin_mac_local *lp = netdev_priv(dev);
-
- if (lp->phydev)
- return phy_ethtool_gset(lp->phydev, cmd);
-
- return -EINVAL;
-}
-
-static int
-bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct bfin_mac_local *lp = netdev_priv(dev);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (lp->phydev)
- return phy_ethtool_sset(lp->phydev, cmd);
-
- return -EINVAL;
-}
-
static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -552,8 +526,6 @@ static int bfin_mac_ethtool_get_ts_info(struct net_device *dev,
#endif
static const struct ethtool_ops bfin_mac_ethtool_ops = {
- .get_settings = bfin_mac_ethtool_getsettings,
- .set_settings = bfin_mac_ethtool_setsettings,
.get_link = ethtool_op_get_link,
.get_drvinfo = bfin_mac_ethtool_getdrvinfo,
.get_wol = bfin_mac_ethtool_getwol,
@@ -561,6 +533,8 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
#ifdef CONFIG_BFIN_MAC_USE_HWSTAMP
.get_ts_info = bfin_mac_ethtool_get_ts_info,
#endif
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/**************************************************************************/
@@ -1427,10 +1401,10 @@ static void bfin_mac_timeout(struct net_device *dev)
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
- bfin_mac_enable(lp->phydev);
+ bfin_mac_enable(dev->phydev);
/* We can accept TX packets again */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
static void bfin_mac_multicast_hash(struct net_device *dev)
@@ -1491,8 +1465,6 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
- struct bfin_mac_local *lp = netdev_priv(netdev);
-
if (!netif_running(netdev))
return -EINVAL;
@@ -1502,8 +1474,8 @@ static int bfin_mac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
case SIOCGHWTSTAMP:
return bfin_mac_hwtstamp_get(netdev, ifr);
default:
- if (lp->phydev)
- return phy_mii_ioctl(lp->phydev, ifr, cmd);
+ if (netdev->phydev)
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
else
return -EOPNOTSUPP;
}
@@ -1547,12 +1519,12 @@ static int bfin_mac_open(struct net_device *dev)
if (ret)
return ret;
- phy_start(lp->phydev);
+ phy_start(dev->phydev);
setup_system_regs(dev);
setup_mac_addr(dev->dev_addr);
bfin_mac_disable();
- ret = bfin_mac_enable(lp->phydev);
+ ret = bfin_mac_enable(dev->phydev);
if (ret)
return ret;
pr_debug("hardware init finished\n");
@@ -1578,8 +1550,8 @@ static int bfin_mac_close(struct net_device *dev)
napi_disable(&lp->napi);
netif_carrier_off(dev);
- phy_stop(lp->phydev);
- phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN);
+ phy_stop(dev->phydev);
+ phy_write(dev->phydev, MII_BMCR, BMCR_PDOWN);
/* clear everything */
bfin_mac_shutdown(dev);
diff --git a/drivers/net/ethernet/adi/bfin_mac.h b/drivers/net/ethernet/adi/bfin_mac.h
index d1217db70db4..8c3b56198e4b 100644
--- a/drivers/net/ethernet/adi/bfin_mac.h
+++ b/drivers/net/ethernet/adi/bfin_mac.h
@@ -92,7 +92,6 @@ struct bfin_mac_local {
int old_speed;
int old_duplex;
- struct phy_device *phydev;
struct mii_bus *mii_bus;
#if defined(CONFIG_BFIN_MAC_USE_HWSTAMP)
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index b873531c5575..bca07c5c94bd 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1323,7 +1323,7 @@ static inline int phy_aneg_done(struct phy_device *phydev)
static int greth_mdio_init(struct greth_private *greth)
{
- int ret, phy;
+ int ret;
unsigned long timeout;
greth->mdio = mdiobus_alloc();
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 0907ab6ff309..c83ebae73d91 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -440,7 +440,6 @@ struct et131x_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct mii_bus *mii_bus;
- struct phy_device *phydev;
struct napi_struct napi;
/* Flags that indicate current state of the adapter */
@@ -864,7 +863,7 @@ static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
{
int32_t delay = 0;
struct mac_regs __iomem *mac = &adapter->regs->mac;
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
u32 cfg1;
u32 cfg2;
u32 ifctrl;
@@ -1035,7 +1034,7 @@ static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
{
struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
u32 sa_lo;
u32 sa_hi = 0;
u32 pf_ctrl = 0;
@@ -1230,7 +1229,7 @@ out:
static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
{
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
if (!phydev)
return -EIO;
@@ -1311,7 +1310,7 @@ static void et1310_phy_read_mii_bit(struct et131x_adapter *adapter,
static void et1310_config_flow_control(struct et131x_adapter *adapter)
{
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
if (phydev->duplex == DUPLEX_HALF) {
adapter->flow = FLOW_NONE;
@@ -1456,7 +1455,7 @@ static int et131x_mdio_write(struct mii_bus *bus, int phy_addr,
static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
{
u16 data;
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
et131x_mii_read(adapter, MII_BMCR, &data);
data &= ~BMCR_PDOWN;
@@ -1469,7 +1468,7 @@ static void et1310_phy_power_switch(struct et131x_adapter *adapter, bool down)
static void et131x_xcvr_init(struct et131x_adapter *adapter)
{
u16 lcr2;
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
/* Set the LED behavior such that LED 1 indicates speed (off =
* 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates
@@ -2111,7 +2110,7 @@ static int et131x_init_recv(struct et131x_adapter *adapter)
/* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate */
static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
{
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
/* For version B silicon, we do not use the RxDMA timer for 10 and 100
* Mbits/s line rates. We do not enable and RxDMA interrupt coalescing.
@@ -2426,7 +2425,7 @@ static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb)
struct sk_buff *skb = tcb->skb;
u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
dma_addr_t dma_addr;
struct tx_ring *tx_ring = &adapter->tx_ring;
@@ -2791,22 +2790,6 @@ static void et131x_handle_send_pkts(struct et131x_adapter *adapter)
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
}
-static int et131x_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
- return phy_ethtool_gset(adapter->phydev, cmd);
-}
-
-static int et131x_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
- return phy_ethtool_sset(adapter->phydev, cmd);
-}
-
static int et131x_get_regs_len(struct net_device *netdev)
{
#define ET131X_REGS_LEN 256
@@ -2979,12 +2962,12 @@ static void et131x_get_drvinfo(struct net_device *netdev,
}
static struct ethtool_ops et131x_ethtool_ops = {
- .get_settings = et131x_get_settings,
- .set_settings = et131x_set_settings,
.get_drvinfo = et131x_get_drvinfo,
.get_regs_len = et131x_get_regs_len,
.get_regs = et131x_get_regs,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/* et131x_hwaddr_init - set up the MAC Address */
@@ -3098,7 +3081,7 @@ err_out:
static void et131x_error_timer_handler(unsigned long data)
{
struct et131x_adapter *adapter = (struct et131x_adapter *)data;
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = adapter->netdev->phydev;
if (et1310_in_phy_coma(adapter)) {
/* Bring the device immediately out of coma, to
@@ -3168,7 +3151,7 @@ static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
static void et131x_adjust_link(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
- struct phy_device *phydev = adapter->phydev;
+ struct phy_device *phydev = netdev->phydev;
if (!phydev)
return;
@@ -3287,7 +3270,6 @@ static int et131x_mii_probe(struct net_device *netdev)
phydev->advertising = phydev->supported;
phydev->autoneg = AUTONEG_ENABLE;
- adapter->phydev = phydev;
phy_attached_info(phydev);
@@ -3323,7 +3305,7 @@ static void et131x_pci_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
netif_napi_del(&adapter->napi);
- phy_disconnect(adapter->phydev);
+ phy_disconnect(netdev->phydev);
mdiobus_unregister(adapter->mii_bus);
mdiobus_free(adapter->mii_bus);
@@ -3338,20 +3320,16 @@ static void et131x_pci_remove(struct pci_dev *pdev)
static void et131x_up(struct net_device *netdev)
{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
et131x_enable_txrx(netdev);
- phy_start(adapter->phydev);
+ phy_start(netdev->phydev);
}
static void et131x_down(struct net_device *netdev)
{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
/* Save the timestamp for the TX watchdog, prevent a timeout */
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
- phy_stop(adapter->phydev);
+ phy_stop(netdev->phydev);
et131x_disable_txrx(netdev);
}
@@ -3684,12 +3662,10 @@ static int et131x_close(struct net_device *netdev)
static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
int cmd)
{
- struct et131x_adapter *adapter = netdev_priv(netdev);
-
- if (!adapter->phydev)
+ if (!netdev->phydev)
return -EINVAL;
- return phy_mii_ioctl(adapter->phydev, reqbuf, cmd);
+ return phy_mii_ioctl(netdev->phydev, reqbuf, cmd);
}
/* et131x_set_packet_filter - Configures the Rx Packet filtering */
@@ -3816,7 +3792,7 @@ static netdev_tx_t et131x_tx(struct sk_buff *skb, struct net_device *netdev)
netif_stop_queue(netdev);
/* Save the timestamp for the TX timeout watchdog */
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
/* TCB is not available */
if (tx_ring->used >= NUM_TCB)
@@ -3851,7 +3827,7 @@ static void et131x_tx_timeout(struct net_device *netdev)
unsigned long flags;
/* If the device is closed, ignore the timeout */
- if (~(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
+ if (!(adapter->flags & FMP_ADAPTER_INTERRUPT_IN_USE))
return;
/* Any nonrecoverable hardware error?
@@ -4073,7 +4049,7 @@ out:
return rc;
err_phy_disconnect:
- phy_disconnect(adapter->phydev);
+ phy_disconnect(netdev->phydev);
err_mdio_unregister:
mdiobus_unregister(adapter->mii_bus);
err_mdio_free:
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 8d50314ac3eb..6ffdff68bfc4 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -77,7 +77,6 @@ struct emac_board_info {
int emacrx_completed_flag;
- struct phy_device *phy_dev;
struct device_node *phy_node;
unsigned int link;
unsigned int speed;
@@ -115,7 +114,7 @@ static void emac_update_duplex(struct net_device *dev)
static void emac_handle_link_change(struct net_device *dev)
{
struct emac_board_info *db = netdev_priv(dev);
- struct phy_device *phydev = db->phy_dev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
int status_change = 0;
@@ -154,21 +153,22 @@ static void emac_handle_link_change(struct net_device *dev)
static int emac_mdio_probe(struct net_device *dev)
{
struct emac_board_info *db = netdev_priv(dev);
+ struct phy_device *phydev;
/* to-do: PHY interrupts are currently not supported */
/* attach the mac to the phy */
- db->phy_dev = of_phy_connect(db->ndev, db->phy_node,
- &emac_handle_link_change, 0,
- db->phy_interface);
- if (!db->phy_dev) {
+ phydev = of_phy_connect(db->ndev, db->phy_node,
+ &emac_handle_link_change, 0,
+ db->phy_interface);
+ if (!phydev) {
netdev_err(db->ndev, "could not find the PHY\n");
return -ENODEV;
}
/* mask with MAC supported features */
- db->phy_dev->supported &= PHY_BASIC_FEATURES;
- db->phy_dev->advertising = db->phy_dev->supported;
+ phydev->supported &= PHY_BASIC_FEATURES;
+ phydev->advertising = phydev->supported;
db->link = 0;
db->speed = 0;
@@ -179,10 +179,7 @@ static int emac_mdio_probe(struct net_device *dev)
static void emac_mdio_remove(struct net_device *dev)
{
- struct emac_board_info *db = netdev_priv(dev);
-
- phy_disconnect(db->phy_dev);
- db->phy_dev = NULL;
+ phy_disconnect(dev->phydev);
}
static void emac_reset(struct emac_board_info *db)
@@ -208,8 +205,7 @@ static void emac_inblk_32bit(void __iomem *reg, void *data, int count)
static int emac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct emac_board_info *dm = netdev_priv(dev);
- struct phy_device *phydev = dm->phy_dev;
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
@@ -229,33 +225,11 @@ static void emac_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
-static int emac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct emac_board_info *dm = netdev_priv(dev);
- struct phy_device *phydev = dm->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int emac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct emac_board_info *dm = netdev_priv(dev);
- struct phy_device *phydev = dm->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static const struct ethtool_ops emac_ethtool_ops = {
.get_drvinfo = emac_get_drvinfo,
- .get_settings = emac_get_settings,
- .set_settings = emac_set_settings,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static unsigned int emac_setup(struct net_device *ndev)
@@ -428,7 +402,7 @@ static void emac_timeout(struct net_device *dev)
emac_reset(db);
emac_init_device(dev);
/* We can accept TX packets again */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_wake_queue(dev);
/* Restore previous register address */
@@ -468,7 +442,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
db->membase + EMAC_TX_CTL0_REG);
/* save the time stamp */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
} else if (channel == 1) {
/* set TX len */
writel(skb->len, db->membase + EMAC_TX_PL1_REG);
@@ -477,7 +451,7 @@ static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
db->membase + EMAC_TX_CTL1_REG);
/* save the time stamp */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
if ((db->tx_fifo_stat & 3) == 3) {
@@ -744,7 +718,7 @@ static int emac_open(struct net_device *dev)
return ret;
}
- phy_start(db->phy_dev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
return 0;
@@ -781,7 +755,7 @@ static int emac_stop(struct net_device *ndev)
netif_stop_queue(ndev);
netif_carrier_off(ndev);
- phy_stop(db->phy_dev);
+ phy_stop(ndev->phydev);
emac_mdio_remove(ndev);
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index 103c30ddddf7..e0052003d16f 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -473,7 +473,6 @@ struct altera_tse_private {
int phy_addr; /* PHY's MDIO address, -1 for autodetection */
phy_interface_t phy_iface;
struct mii_bus *mdio;
- struct phy_device *phydev;
int oldspeed;
int oldduplex;
int oldlink;
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index be72e1e64525..7c367713c3e6 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -233,40 +233,18 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
buf[i] = csrrd32(priv->mac_dev, i * 4);
}
-static int tse_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
-
- if (phydev == NULL)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int tse_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
-
- if (phydev == NULL)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static const struct ethtool_ops tse_ethtool_ops = {
.get_drvinfo = tse_get_drvinfo,
.get_regs_len = tse_reglen,
.get_regs = tse_get_regs,
.get_link = ethtool_op_get_link,
- .get_settings = tse_get_settings,
- .set_settings = tse_set_settings,
.get_strings = tse_gstrings,
.get_sset_count = tse_sset_count,
.get_ethtool_stats = tse_fill_stats,
.get_msglevel = tse_get_msglevel,
.set_msglevel = tse_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
void altera_tse_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index f749e4d389eb..bda31f308cc2 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -625,7 +625,7 @@ out:
static void altera_tse_adjust_link(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
int new_state = 0;
/* only change config if there is a link */
@@ -815,6 +815,7 @@ static int init_phy(struct net_device *dev)
phydev = of_phy_connect(dev, phynode,
&altera_tse_adjust_link, 0, priv->phy_iface);
}
+ of_node_put(phynode);
if (!phydev) {
netdev_err(dev, "Could not find the PHY\n");
@@ -845,7 +846,6 @@ static int init_phy(struct net_device *dev)
netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
phydev->mdio.addr, phydev->phy_id, phydev->link);
- priv->phydev = phydev;
return 0;
}
@@ -1172,8 +1172,8 @@ static int tse_open(struct net_device *dev)
spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
- if (priv->phydev)
- phy_start(priv->phydev);
+ if (dev->phydev)
+ phy_start(dev->phydev);
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -1205,8 +1205,8 @@ static int tse_shutdown(struct net_device *dev)
unsigned long int flags;
/* Stop the PHY */
- if (priv->phydev)
- phy_stop(priv->phydev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1545,10 +1545,9 @@ err_free_netdev:
static int altera_tse_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
- struct altera_tse_private *priv = netdev_priv(ndev);
- if (priv->phydev)
- phy_disconnect(priv->phydev);
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
platform_set_drvdata(pdev, NULL);
altera_tse_mdio_destroy(ndev);
diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c
index 66d0b73c39c0..dcf2a1f3643d 100644
--- a/drivers/net/ethernet/amd/7990.c
+++ b/drivers/net/ethernet/amd/7990.c
@@ -260,7 +260,7 @@ static int lance_reset(struct net_device *dev)
load_csrs(lp);
lance_init_ring(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
status = init_restart_lance(lp);
#ifdef DEBUG_DRIVER
printk("Lance restart=%d\n", status);
@@ -530,7 +530,7 @@ void lance_tx_timeout(struct net_device *dev)
{
printk("lance_tx_timeout\n");
lance_reset(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
EXPORT_SYMBOL_GPL(lance_tx_timeout);
@@ -543,11 +543,13 @@ int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
static int outs;
unsigned long flags;
- if (!TX_BUFFS_AVAIL)
- return NETDEV_TX_LOCKED;
-
netif_stop_queue(dev);
+ if (!TX_BUFFS_AVAIL) {
+ dev_consume_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
skblen = skb->len;
#ifdef DEBUG_DRIVER
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 56139184b801..a83cd1c4ce1d 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -512,7 +512,7 @@ static inline int lance_reset(struct net_device *dev)
load_csrs(lp);
lance_init_ring(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_start_queue(dev);
status = init_restart_lance(lp);
@@ -547,10 +547,8 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
local_irq_save(flags);
- if (!lance_tx_buffs_avail(lp)) {
- local_irq_restore(flags);
- return NETDEV_TX_LOCKED;
- }
+ if (!lance_tx_buffs_avail(lp))
+ goto out_free;
#ifdef DEBUG
/* dump the packet */
@@ -573,6 +571,7 @@ static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
/* Kick the lance: transmit now */
ll->rdp = LE_C0_INEA | LE_C0_TDMD;
+ out_free:
dev_kfree_skb(skb);
local_irq_restore(flags);
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index b10964e8cb54..d2bc8e5dcd23 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -764,7 +764,7 @@ static void lance_tx_timeout (struct net_device *dev)
/* lance_restart, essentially */
lance_init_ring(dev);
REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index d3977d032b48..df664187cd82 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -412,13 +412,13 @@ static void
au1000_adjust_link(struct net_device *dev)
{
struct au1000_private *aup = netdev_priv(dev);
- struct phy_device *phydev = aup->phy_dev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
u32 reg;
int status_change = 0;
- BUG_ON(!aup->phy_dev);
+ BUG_ON(!phydev);
spin_lock_irqsave(&aup->lock, flags);
@@ -509,8 +509,8 @@ static int au1000_mii_probe(struct net_device *dev)
* on the current MAC's MII bus
*/
for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++)
- if (mdiobus_get_phy(aup->mii_bus, aup->phy_addr)) {
- phydev = mdiobus_get_phy(aup->mii_bus, aup->phy_addr);
+ if (mdiobus_get_phy(aup->mii_bus, phy_addr)) {
+ phydev = mdiobus_get_phy(aup->mii_bus, phy_addr);
if (!aup->phy_search_highest_addr)
/* break out with first one found */
break;
@@ -579,7 +579,6 @@ static int au1000_mii_probe(struct net_device *dev)
aup->old_link = 0;
aup->old_speed = 0;
aup->old_duplex = -1;
- aup->phy_dev = phydev;
phy_attached_info(phydev);
@@ -678,29 +677,6 @@ au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base)
* ethtool operations
*/
-static int au1000_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct au1000_private *aup = netdev_priv(dev);
-
- if (aup->phy_dev)
- return phy_ethtool_gset(aup->phy_dev, cmd);
-
- return -EINVAL;
-}
-
-static int au1000_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct au1000_private *aup = netdev_priv(dev);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (aup->phy_dev)
- return phy_ethtool_sset(aup->phy_dev, cmd);
-
- return -EINVAL;
-}
-
static void
au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
@@ -725,12 +701,12 @@ static u32 au1000_get_msglevel(struct net_device *dev)
}
static const struct ethtool_ops au1000_ethtool_ops = {
- .get_settings = au1000_get_settings,
- .set_settings = au1000_set_settings,
.get_drvinfo = au1000_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_msglevel = au1000_get_msglevel,
.set_msglevel = au1000_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
@@ -778,8 +754,8 @@ static int au1000_init(struct net_device *dev)
#ifndef CONFIG_CPU_LITTLE_ENDIAN
control |= MAC_BIG_ENDIAN;
#endif
- if (aup->phy_dev) {
- if (aup->phy_dev->link && (DUPLEX_FULL == aup->phy_dev->duplex))
+ if (dev->phydev) {
+ if (dev->phydev->link && (DUPLEX_FULL == dev->phydev->duplex))
control |= MAC_FULL_DUPLEX;
else
control |= MAC_DISABLE_RX_OWN;
@@ -891,11 +867,10 @@ static int au1000_rx(struct net_device *dev)
static void au1000_update_tx_stats(struct net_device *dev, u32 status)
{
- struct au1000_private *aup = netdev_priv(dev);
struct net_device_stats *ps = &dev->stats;
if (status & TX_FRAME_ABORTED) {
- if (!aup->phy_dev || (DUPLEX_FULL == aup->phy_dev->duplex)) {
+ if (!dev->phydev || (DUPLEX_FULL == dev->phydev->duplex)) {
if (status & (TX_JAB_TIMEOUT | TX_UNDERRUN)) {
/* any other tx errors are only valid
* in half duplex mode
@@ -975,10 +950,10 @@ static int au1000_open(struct net_device *dev)
return retval;
}
- if (aup->phy_dev) {
+ if (dev->phydev) {
/* cause the PHY state machine to schedule a link state check */
- aup->phy_dev->state = PHY_CHANGELINK;
- phy_start(aup->phy_dev);
+ dev->phydev->state = PHY_CHANGELINK;
+ phy_start(dev->phydev);
}
netif_start_queue(dev);
@@ -995,8 +970,8 @@ static int au1000_close(struct net_device *dev)
netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
- if (aup->phy_dev)
- phy_stop(aup->phy_dev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
spin_lock_irqsave(&aup->lock, flags);
@@ -1074,7 +1049,7 @@ static void au1000_tx_timeout(struct net_device *dev)
netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
au1000_reset_mac(dev);
au1000_init(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1110,15 +1085,13 @@ static void au1000_multicast_list(struct net_device *dev)
static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct au1000_private *aup = netdev_priv(dev);
-
if (!netif_running(dev))
return -EINVAL;
- if (!aup->phy_dev)
+ if (!dev->phydev)
return -EINVAL; /* PHY not controllable */
- return phy_mii_ioctl(aup->phy_dev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
static const struct net_device_ops au1000_netdev_ops = {
@@ -1269,7 +1242,7 @@ static int au1000_probe(struct platform_device *pdev)
aup->phy_irq = pd->phy_irq;
}
- if (aup->phy_busid && aup->phy_busid > 0) {
+ if (aup->phy_busid > 0) {
dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII bus not supported yet\n");
err = -ENODEV;
goto err_mdiobus_alloc;
diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h
index ca53024f017f..4c47c2377d74 100644
--- a/drivers/net/ethernet/amd/au1000_eth.h
+++ b/drivers/net/ethernet/amd/au1000_eth.h
@@ -106,7 +106,6 @@ struct au1000_private {
int old_speed;
int old_duplex;
- struct phy_device *phy_dev;
struct mii_bus *mii_bus;
/* PHY configuration */
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c
index b584b78237df..b799c7ac899b 100644
--- a/drivers/net/ethernet/amd/declance.c
+++ b/drivers/net/ethernet/amd/declance.c
@@ -877,7 +877,7 @@ static inline int lance_reset(struct net_device *dev)
lance_init_ring(dev);
load_csrs(lp);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 3a7ebfdda57d..abb1ba228b26 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -943,7 +943,7 @@ static void lance_tx_timeout (struct net_device *dev)
#endif
lance_restart (dev, 0x0043, 1);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue (dev);
}
diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c
index 1cf33addd15e..cda53db75f17 100644
--- a/drivers/net/ethernet/amd/ni65.c
+++ b/drivers/net/ethernet/amd/ni65.c
@@ -782,7 +782,7 @@ static void ni65_stop_start(struct net_device *dev,struct priv *p)
if(!p->lock)
if (p->tmdnum || !p->xmit_queued)
netif_wake_queue(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
else
writedatareg(CSR0_STRT | csr0);
@@ -1148,7 +1148,7 @@ static void ni65_timeout(struct net_device *dev)
printk("%02x ",p->tmdhead[i].u.s.status);
printk("\n");
ni65_lance_reinit(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 27245efe9f50..2807e181647b 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -851,7 +851,7 @@ static void mace_tx_timeout(struct net_device *dev)
#else /* #if RESET_ON_TIMEOUT */
pr_cont("NOT resetting card\n");
#endif /* #if RESET_ON_TIMEOUT */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 7ccebae9cb48..c22bf52d3320 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -448,7 +448,7 @@ static void pcnet32_netif_stop(struct net_device *dev)
{
struct pcnet32_private *lp = netdev_priv(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
napi_disable(&lp->napi);
netif_tx_disable(dev);
}
@@ -2426,7 +2426,7 @@ static void pcnet32_tx_timeout(struct net_device *dev)
}
pcnet32_restart(dev, CSR0_NORMAL);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
spin_unlock_irqrestore(&lp->lock, flags);
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 7847638bdd22..9b56b40259dc 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -997,7 +997,7 @@ static int lance_reset(struct net_device *dev)
}
lp->init_ring(dev);
load_csrs(lp);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
status = init_restart_lance(lp);
return status;
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index ebf9224b2d31..a9b2709567ec 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -154,7 +154,7 @@ static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
goto err_rx_ring;
for (i = 0, channel = channel_mem; i < count; i++, channel++) {
- snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
+ snprintf(channel->name, sizeof(channel->name), "channel-%u", i);
channel->pdata = pdata;
channel->queue_index = i;
channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
index 19e38afbc5ee..300e3b5c54e0 100644
--- a/drivers/net/ethernet/apm/xgene/Kconfig
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -3,6 +3,7 @@ config NET_XGENE
depends on HAS_DMA
depends on ARCH_XGENE || COMPILE_TEST
select PHYLIB
+ select MDIO_XGENE
help
This is the Ethernet driver for the on-chip ethernet interface on the
APM X-Gene SoC.
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
index 11be8044e0d7..472c0fb3f4c4 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.c
@@ -730,6 +730,6 @@ static int xgene_enet_cle_init(struct xgene_enet_pdata *pdata)
return xgene_cle_setup_ptree(pdata, enet_cle);
}
-struct xgene_cle_ops xgene_cle3in_ops = {
+const struct xgene_cle_ops xgene_cle3in_ops = {
.cle_init = xgene_enet_cle_init,
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
index 3bf90683240e..33c5f6b25824 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_cle.h
@@ -292,6 +292,6 @@ struct xgene_enet_cle {
u32 jump_bytes;
};
-extern struct xgene_cle_ops xgene_cle3in_ops;
+extern const struct xgene_cle_ops xgene_cle3in_ops;
#endif /* __XGENE_ENET_CLE_H__ */
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
index 416d6ebfc2ce..22a7b26ca1d6 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ethtool.c
@@ -65,8 +65,15 @@ static int xgene_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
return phy_ethtool_gset(phydev, cmd);
} else if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
- cmd->supported = SUPPORTED_1000baseT_Full |
- SUPPORTED_Autoneg | SUPPORTED_MII;
+ if (pdata->mdio_driver) {
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phydev, cmd);
+ }
+
+ cmd->supported = SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
+ SUPPORTED_MII;
cmd->advertising = cmd->supported;
ethtool_cmd_speed_set(cmd, SPEED_1000);
cmd->duplex = DUPLEX_FULL;
@@ -92,12 +99,21 @@ static int xgene_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
struct phy_device *phydev = pdata->phy_dev;
if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
- if (phydev == NULL)
+ if (!phydev)
return -ENODEV;
return phy_ethtool_sset(phydev, cmd);
}
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+ if (pdata->mdio_driver) {
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phydev, cmd);
+ }
+ }
+
return -EINVAL;
}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 513d2a62ee6d..18bb9556dd00 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -381,59 +381,6 @@ static void xgene_enet_rd_mcx_mac(struct xgene_enet_pdata *pdata,
rd_addr);
}
-static int xgene_mii_phy_write(struct xgene_enet_pdata *pdata, int phy_id,
- u32 reg, u16 data)
-{
- u32 addr = 0, wr_data = 0;
- u32 done;
- u8 wait = 10;
-
- PHY_ADDR_SET(&addr, phy_id);
- REG_ADDR_SET(&addr, reg);
- xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
-
- PHY_CONTROL_SET(&wr_data, data);
- xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONTROL_ADDR, wr_data);
- do {
- usleep_range(5, 10);
- xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
- } while ((done & BUSY_MASK) && wait--);
-
- if (done & BUSY_MASK) {
- netdev_err(pdata->ndev, "MII_MGMT write failed\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int xgene_mii_phy_read(struct xgene_enet_pdata *pdata,
- u8 phy_id, u32 reg)
-{
- u32 addr = 0;
- u32 data, done;
- u8 wait = 10;
-
- PHY_ADDR_SET(&addr, phy_id);
- REG_ADDR_SET(&addr, reg);
- xgene_enet_wr_mcx_mac(pdata, MII_MGMT_ADDRESS_ADDR, addr);
- xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, READ_CYCLE_MASK);
- do {
- usleep_range(5, 10);
- xgene_enet_rd_mcx_mac(pdata, MII_MGMT_INDICATORS_ADDR, &done);
- } while ((done & BUSY_MASK) && wait--);
-
- if (done & BUSY_MASK) {
- netdev_err(pdata->ndev, "MII_MGMT read failed\n");
- return -EBUSY;
- }
-
- xgene_enet_rd_mcx_mac(pdata, MII_MGMT_STATUS_ADDR, &data);
- xgene_enet_wr_mcx_mac(pdata, MII_MGMT_COMMAND_ADDR, 0);
-
- return data;
-}
-
static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
{
u32 addr0, addr1;
@@ -512,14 +459,11 @@ static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata)
#endif
}
-static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
+static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata)
{
struct device *dev = &pdata->pdev->dev;
- u32 value, mc2;
- u32 intf_ctl, rgmii;
- u32 icm0, icm2;
-
- xgene_gmac_reset(pdata);
+ u32 icm0, icm2, mc2;
+ u32 intf_ctl, rgmii, value;
xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0);
xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2);
@@ -564,7 +508,21 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
mc2 |= FULL_DUPLEX2 | PAD_CRC;
xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_2_ADDR, mc2);
xgene_enet_wr_mcx_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl);
+ xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
+ xgene_enet_configure_clock(pdata);
+
+ xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
+ xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
+}
+
+static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
+{
+ u32 value;
+ if (!pdata->mdio_driver)
+ xgene_gmac_reset(pdata);
+
+ xgene_gmac_set_speed(pdata);
xgene_gmac_set_mac_addr(pdata);
/* Adjust MDC clock frequency */
@@ -579,15 +537,10 @@ static void xgene_gmac_init(struct xgene_enet_pdata *pdata)
/* Rtype should be copied from FP */
xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0);
- xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii);
- xgene_enet_configure_clock(pdata);
/* Rx-Tx traffic resume */
xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0);
- xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0);
- xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2);
-
xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value);
value &= ~TX_DV_GATE_EN0;
value &= ~RX_DV_GATE_EN0;
@@ -671,93 +624,155 @@ bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
{
- u32 val;
+ struct device *dev = &pdata->pdev->dev;
if (!xgene_ring_mgr_init(pdata))
return -ENODEV;
- if (!IS_ERR(pdata->clk)) {
+ if (pdata->mdio_driver) {
+ xgene_enet_config_ring_if_assoc(pdata);
+ return 0;
+ }
+
+ if (dev->of_node) {
clk_prepare_enable(pdata->clk);
+ udelay(5);
clk_disable_unprepare(pdata->clk);
+ udelay(5);
clk_prepare_enable(pdata->clk);
- xgene_enet_ecc_init(pdata);
+ udelay(5);
+ } else {
+#ifdef CONFIG_ACPI
+ if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
+ acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_RST", NULL, NULL);
+ } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
+ "_INI")) {
+ acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_INI", NULL, NULL);
+ }
+#endif
}
- xgene_enet_config_ring_if_assoc(pdata);
- /* Enable auto-incr for scanning */
- xgene_enet_rd_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, &val);
- val |= SCAN_AUTO_INCR;
- MGMT_CLOCK_SEL_SET(&val, 1);
- xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
+ xgene_enet_ecc_init(pdata);
+ xgene_enet_config_ring_if_assoc(pdata);
return 0;
}
-static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
+static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring)
{
- if (!IS_ERR(pdata->clk))
- clk_disable_unprepare(pdata->clk);
-}
+ u32 addr, val, data;
-static int xgene_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
-{
- struct xgene_enet_pdata *pdata = bus->priv;
- u32 val;
+ val = xgene_enet_ring_bufnum(ring->id);
- val = xgene_mii_phy_read(pdata, mii_id, regnum);
- netdev_dbg(pdata->ndev, "mdio_rd: bus=%d reg=%d val=%x\n",
- mii_id, regnum, val);
+ if (xgene_enet_is_bufpool(ring->id)) {
+ addr = ENET_CFGSSQMIFPRESET_ADDR;
+ data = BIT(val - 0x20);
+ } else {
+ addr = ENET_CFGSSQMIWQRESET_ADDR;
+ data = BIT(val);
+ }
- return val;
+ xgene_enet_wr_ring_if(pdata, addr, data);
}
-static int xgene_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 val)
+static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
{
- struct xgene_enet_pdata *pdata = bus->priv;
+ struct device *dev = &pdata->pdev->dev;
+ struct xgene_enet_desc_ring *ring;
+ u32 pb, val;
+ int i;
+
+ pb = 0;
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ ring = pdata->rx_ring[i]->buf_pool;
+
+ val = xgene_enet_ring_bufnum(ring->id);
+ pb |= BIT(val - 0x20);
+ }
+ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
+
+ pb = 0;
+ for (i = 0; i < pdata->txq_cnt; i++) {
+ ring = pdata->tx_ring[i];
+
+ val = xgene_enet_ring_bufnum(ring->id);
+ pb |= BIT(val);
+ }
+ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
- netdev_dbg(pdata->ndev, "mdio_wr: bus=%d reg=%d val=%x\n",
- mii_id, regnum, val);
- return xgene_mii_phy_write(pdata, mii_id, regnum, val);
+ if (dev->of_node) {
+ if (!IS_ERR(pdata->clk))
+ clk_disable_unprepare(pdata->clk);
+ }
}
static void xgene_enet_adjust_link(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
struct phy_device *phydev = pdata->phy_dev;
if (phydev->link) {
if (pdata->phy_speed != phydev->speed) {
pdata->phy_speed = phydev->speed;
- xgene_gmac_init(pdata);
- xgene_gmac_rx_enable(pdata);
- xgene_gmac_tx_enable(pdata);
+ mac_ops->set_speed(pdata);
+ mac_ops->rx_enable(pdata);
+ mac_ops->tx_enable(pdata);
phy_print_status(phydev);
}
} else {
- xgene_gmac_rx_disable(pdata);
- xgene_gmac_tx_disable(pdata);
+ mac_ops->rx_disable(pdata);
+ mac_ops->tx_disable(pdata);
pdata->phy_speed = SPEED_UNKNOWN;
phy_print_status(phydev);
}
}
-static int xgene_enet_phy_connect(struct net_device *ndev)
+#ifdef CONFIG_ACPI
+static struct acpi_device *acpi_phy_find_device(struct device *dev)
+{
+ struct acpi_reference_args args;
+ struct fwnode_handle *fw_node;
+ int status;
+
+ fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev));
+ status = acpi_node_get_property_reference(fw_node, "phy-handle", 0,
+ &args);
+ if (ACPI_FAILURE(status)) {
+ dev_dbg(dev, "No matching phy in ACPI table\n");
+ return NULL;
+ }
+
+ return args.adev;
+}
+#endif
+
+int xgene_enet_phy_connect(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
- struct device_node *phy_np;
+ struct device_node *np;
struct phy_device *phy_dev;
struct device *dev = &pdata->pdev->dev;
+ int i;
if (dev->of_node) {
- phy_np = of_parse_phandle(dev->of_node, "phy-handle", 0);
- if (!phy_np) {
+ for (i = 0 ; i < 2; i++) {
+ np = of_parse_phandle(dev->of_node, "phy-handle", i);
+ if (np)
+ break;
+ }
+
+ if (!np) {
netdev_dbg(ndev, "No phy-handle found in DT\n");
return -ENODEV;
}
- phy_dev = of_phy_connect(ndev, phy_np, &xgene_enet_adjust_link,
+ phy_dev = of_phy_connect(ndev, np, &xgene_enet_adjust_link,
0, pdata->phy_mode);
+ of_node_put(np);
if (!phy_dev) {
netdev_err(ndev, "Could not connect to PHY\n");
return -ENODEV;
@@ -765,6 +780,11 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
pdata->phy_dev = phy_dev;
} else {
+#ifdef CONFIG_ACPI
+ struct acpi_device *adev = acpi_phy_find_device(dev);
+ if (adev)
+ pdata->phy_dev = adev->driver_data;
+
phy_dev = pdata->phy_dev;
if (!phy_dev ||
@@ -773,6 +793,9 @@ static int xgene_enet_phy_connect(struct net_device *ndev)
netdev_err(ndev, "Could not connect to PHY\n");
return -ENODEV;
}
+#else
+ return -ENODEV;
+#endif
}
pdata->phy_speed = SPEED_UNKNOWN;
@@ -792,8 +815,8 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
struct phy_device *phy;
struct device_node *child_np;
struct device_node *mdio_np = NULL;
+ u32 phy_addr;
int ret;
- u32 phy_id;
if (dev->of_node) {
for_each_child_of_node(dev->of_node, child_np) {
@@ -820,21 +843,17 @@ static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata,
if (ret)
return ret;
- ret = device_property_read_u32(dev, "phy-channel", &phy_id);
+ ret = device_property_read_u32(dev, "phy-channel", &phy_addr);
if (ret)
- ret = device_property_read_u32(dev, "phy-addr", &phy_id);
+ ret = device_property_read_u32(dev, "phy-addr", &phy_addr);
if (ret)
return -EINVAL;
- phy = get_phy_device(mdio, phy_id, false);
- if (!phy || IS_ERR(phy))
+ phy = xgene_enet_phy_register(mdio, phy_addr);
+ if (!phy)
return -EIO;
- ret = phy_device_register(phy);
- if (ret)
- phy_device_free(phy);
- else
- pdata->phy_dev = phy;
+ pdata->phy_dev = phy;
return ret;
}
@@ -850,13 +869,13 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
return -ENOMEM;
mdio_bus->name = "APM X-Gene MDIO bus";
- mdio_bus->read = xgene_enet_mdio_read;
- mdio_bus->write = xgene_enet_mdio_write;
+ mdio_bus->read = xgene_mdio_rgmii_read;
+ mdio_bus->write = xgene_mdio_rgmii_write;
snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii",
ndev->name);
- mdio_bus->priv = pdata;
- mdio_bus->parent = &ndev->dev;
+ mdio_bus->priv = (void __force *)pdata->mcx_mac_addr;
+ mdio_bus->parent = &pdata->pdev->dev;
ret = xgene_mdiobus_register(pdata, mdio_bus);
if (ret) {
@@ -873,6 +892,12 @@ int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata)
return ret;
}
+void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata)
+{
+ if (pdata->phy_dev)
+ phy_disconnect(pdata->phy_dev);
+}
+
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata)
{
if (pdata->phy_dev)
@@ -890,11 +915,13 @@ const struct xgene_mac_ops xgene_gmac_ops = {
.tx_enable = xgene_gmac_tx_enable,
.rx_disable = xgene_gmac_rx_disable,
.tx_disable = xgene_gmac_tx_disable,
+ .set_speed = xgene_gmac_set_speed,
.set_mac_addr = xgene_gmac_set_mac_addr,
};
const struct xgene_port_ops xgene_gport_ops = {
.reset = xgene_enet_reset,
+ .clear = xgene_enet_clear,
.cle_bypass = xgene_enet_cle_bypass,
.shutdown = xgene_gport_shutdown,
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 45220be3122f..179a44dceb29 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -104,6 +104,8 @@ enum xgene_enet_rm {
#define RECOMBBUF BIT(27)
#define MAC_OFFSET 0x30
+#define OFFSET_4 0x04
+#define OFFSET_8 0x08
#define BLOCK_ETH_CSR_OFFSET 0x2000
#define BLOCK_ETH_CLE_CSR_OFFSET 0x6000
@@ -165,6 +167,8 @@ enum xgene_enet_rm {
#define TX_DV_GATE_EN0 BIT(2)
#define RX_DV_GATE_EN0 BIT(1)
#define RESUME_RX0 BIT(0)
+#define ENET_CFGSSQMIFPRESET_ADDR 0x14
+#define ENET_CFGSSQMIWQRESET_ADDR 0x1c
#define ENET_CFGSSQMIWQASSOC_ADDR 0xe0
#define ENET_CFGSSQMIFPQASSOC_ADDR 0xdc
#define ENET_CFGSSQMIQMLITEFPQASSOC_ADDR 0xf0
@@ -297,11 +301,6 @@ enum xgene_enet_ring_bufnum {
RING_BUFNUM_INVALID
};
-enum xgene_enet_cmd {
- XGENE_ENET_WR_CMD = BIT(31),
- XGENE_ENET_RD_CMD = BIT(30)
-};
-
enum xgene_enet_err_code {
HBF_READ_DATA = 3,
HBF_LL_READ = 4,
@@ -347,6 +346,8 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
+int xgene_enet_phy_connect(struct net_device *ndev);
+void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata);
extern const struct xgene_mac_ops xgene_gmac_ops;
extern const struct xgene_port_ops xgene_gport_ops;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index fd200883d228..d1d6b5eeb613 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -102,25 +102,13 @@ static u8 xgene_enet_hdr_len(const void *data)
static void xgene_enet_delete_bufpool(struct xgene_enet_desc_ring *buf_pool)
{
- struct xgene_enet_pdata *pdata = netdev_priv(buf_pool->ndev);
- struct xgene_enet_raw_desc16 *raw_desc;
- u32 slots = buf_pool->slots - 1;
- u32 tail = buf_pool->tail;
- u32 userinfo;
- int i, len;
-
- len = pdata->ring_ops->len(buf_pool);
- for (i = 0; i < len; i++) {
- tail = (tail - 1) & slots;
- raw_desc = &buf_pool->raw_desc16[tail];
+ int i;
- /* Hardware stores descriptor in little endian format */
- userinfo = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
- dev_kfree_skb_any(buf_pool->rx_skb[userinfo]);
+ /* Free up the buffers held by hardware */
+ for (i = 0; i < buf_pool->slots; i++) {
+ if (buf_pool->rx_skb[i])
+ dev_kfree_skb_any(buf_pool->rx_skb[i]);
}
-
- pdata->ring_ops->wr_cmd(buf_pool, -len);
- buf_pool->tail = tail;
}
static irqreturn_t xgene_enet_rx_irq(const int irq, void *data)
@@ -481,6 +469,7 @@ static int xgene_enet_rx_frame(struct xgene_enet_desc_ring *rx_ring,
XGENE_ENET_MAX_MTU, DMA_FROM_DEVICE);
skb_index = GET_VAL(USERINFO, le64_to_cpu(raw_desc->m0));
skb = buf_pool->rx_skb[skb_index];
+ buf_pool->rx_skb[skb_index] = NULL;
/* checking for error */
status = (GET_VAL(ELERR, le64_to_cpu(raw_desc->m0)) << LERR_LEN) ||
@@ -619,6 +608,30 @@ static void xgene_enet_timeout(struct net_device *ndev)
}
}
+static void xgene_enet_set_irq_name(struct net_device *ndev)
+{
+ struct xgene_enet_pdata *pdata = netdev_priv(ndev);
+ struct xgene_enet_desc_ring *ring;
+ int i;
+
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ ring = pdata->rx_ring[i];
+ if (!pdata->cq_cnt) {
+ snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
+ ndev->name);
+ } else {
+ snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-rx-%d",
+ ndev->name, i);
+ }
+ }
+
+ for (i = 0; i < pdata->cq_cnt; i++) {
+ ring = pdata->tx_ring[i]->cp_ring;
+ snprintf(ring->irq_name, IRQ_ID_SIZE, "%s-txc-%d",
+ ndev->name, i);
+ }
+}
+
static int xgene_enet_register_irq(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
@@ -626,6 +639,7 @@ static int xgene_enet_register_irq(struct net_device *ndev)
struct xgene_enet_desc_ring *ring;
int ret = 0, i;
+ xgene_enet_set_irq_name(ndev);
for (i = 0; i < pdata->rxq_cnt; i++) {
ring = pdata->rx_ring[i];
irq_set_status_flags(ring->irq, IRQ_DISABLE_UNLAZY);
@@ -720,20 +734,21 @@ static int xgene_enet_open(struct net_device *ndev)
if (ret)
return ret;
- mac_ops->tx_enable(pdata);
- mac_ops->rx_enable(pdata);
-
xgene_enet_napi_enable(pdata);
ret = xgene_enet_register_irq(ndev);
if (ret)
return ret;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (pdata->phy_dev) {
phy_start(pdata->phy_dev);
- else
+ } else {
schedule_delayed_work(&pdata->link_work, PHY_POLL_LINK_OFF);
+ netif_carrier_off(ndev);
+ }
- netif_start_queue(ndev);
+ mac_ops->tx_enable(pdata);
+ mac_ops->rx_enable(pdata);
+ netif_tx_start_all_queues(ndev);
return ret;
}
@@ -744,16 +759,15 @@ static int xgene_enet_close(struct net_device *ndev)
const struct xgene_mac_ops *mac_ops = pdata->mac_ops;
int i;
- netif_stop_queue(ndev);
+ netif_tx_stop_all_queues(ndev);
+ mac_ops->tx_disable(pdata);
+ mac_ops->rx_disable(pdata);
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (pdata->phy_dev)
phy_stop(pdata->phy_dev);
else
cancel_delayed_work_sync(&pdata->link_work);
- mac_ops->tx_disable(pdata);
- mac_ops->rx_disable(pdata);
-
xgene_enet_free_irq(ndev);
xgene_enet_napi_disable(pdata);
for (i = 0; i < pdata->rxq_cnt; i++)
@@ -761,7 +775,6 @@ static int xgene_enet_close(struct net_device *ndev)
return 0;
}
-
static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
{
struct xgene_enet_pdata *pdata;
@@ -771,7 +784,7 @@ static void xgene_enet_delete_ring(struct xgene_enet_desc_ring *ring)
dev = ndev_to_dev(ring->ndev);
pdata->ring_ops->clear(ring);
- dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
+ dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
}
static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
@@ -784,6 +797,9 @@ static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
ring = pdata->tx_ring[i];
if (ring) {
xgene_enet_delete_ring(ring);
+ pdata->port_ops->clear(pdata, ring);
+ if (pdata->cq_cnt)
+ xgene_enet_delete_ring(ring->cp_ring);
pdata->tx_ring[i] = NULL;
}
}
@@ -794,6 +810,7 @@ static void xgene_enet_delete_desc_rings(struct xgene_enet_pdata *pdata)
buf_pool = ring->buf_pool;
xgene_enet_delete_bufpool(buf_pool);
xgene_enet_delete_ring(buf_pool);
+ pdata->port_ops->clear(pdata, buf_pool);
xgene_enet_delete_ring(ring);
pdata->rx_ring[i] = NULL;
}
@@ -842,7 +859,7 @@ static void xgene_enet_free_desc_ring(struct xgene_enet_desc_ring *ring)
if (ring->desc_addr) {
pdata->ring_ops->clear(ring);
- dma_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
+ dmam_free_coherent(dev, ring->size, ring->desc_addr, ring->dma);
}
devm_kfree(dev, ring);
}
@@ -900,9 +917,10 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
struct net_device *ndev, u32 ring_num,
enum xgene_enet_ring_cfgsize cfgsize, u32 ring_id)
{
- struct xgene_enet_desc_ring *ring;
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
struct device *dev = ndev_to_dev(ndev);
+ struct xgene_enet_desc_ring *ring;
+ void *irq_mbox_addr;
int size;
size = xgene_enet_get_ring_size(dev, cfgsize);
@@ -919,8 +937,8 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
ring->cfgsize = cfgsize;
ring->id = ring_id;
- ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma,
- GFP_KERNEL);
+ ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma,
+ GFP_KERNEL | __GFP_ZERO);
if (!ring->desc_addr) {
devm_kfree(dev, ring);
return NULL;
@@ -928,14 +946,16 @@ static struct xgene_enet_desc_ring *xgene_enet_create_desc_ring(
ring->size = size;
if (is_irq_mbox_required(pdata, ring)) {
- ring->irq_mbox_addr = dma_zalloc_coherent(dev, INTR_MBOX_SIZE,
- &ring->irq_mbox_dma, GFP_KERNEL);
- if (!ring->irq_mbox_addr) {
- dma_free_coherent(dev, size, ring->desc_addr,
- ring->dma);
+ irq_mbox_addr = dmam_alloc_coherent(dev, INTR_MBOX_SIZE,
+ &ring->irq_mbox_dma,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!irq_mbox_addr) {
+ dmam_free_coherent(dev, size, ring->desc_addr,
+ ring->dma);
devm_kfree(dev, ring);
return NULL;
}
+ ring->irq_mbox_addr = irq_mbox_addr;
}
ring->cmd_base = xgene_enet_ring_cmd_base(pdata, ring);
@@ -973,6 +993,17 @@ static enum xgene_ring_owner xgene_derive_ring_owner(struct xgene_enet_pdata *p)
return owner;
}
+static u8 xgene_start_cpu_bufnum(struct xgene_enet_pdata *pdata)
+{
+ struct device *dev = &pdata->pdev->dev;
+ u32 cpu_bufnum;
+ int ret;
+
+ ret = device_property_read_u32(dev, "channel", &cpu_bufnum);
+
+ return (!ret) ? cpu_bufnum : pdata->cpu_bufnum;
+}
+
static int xgene_enet_create_desc_rings(struct net_device *ndev)
{
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
@@ -981,13 +1012,16 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
struct xgene_enet_desc_ring *buf_pool = NULL;
enum xgene_ring_owner owner;
dma_addr_t dma_exp_bufs;
- u8 cpu_bufnum = pdata->cpu_bufnum;
+ u8 cpu_bufnum;
u8 eth_bufnum = pdata->eth_bufnum;
u8 bp_bufnum = pdata->bp_bufnum;
u16 ring_num = pdata->ring_num;
+ __le64 *exp_bufs;
u16 ring_id;
int i, ret, size;
+ cpu_bufnum = xgene_start_cpu_bufnum(pdata);
+
for (i = 0; i < pdata->rxq_cnt; i++) {
/* allocate rx descriptor ring */
owner = xgene_derive_ring_owner(pdata);
@@ -1014,13 +1048,6 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
rx_ring->nbufpool = NUM_BUFPOOL;
rx_ring->buf_pool = buf_pool;
rx_ring->irq = pdata->irqs[i];
- if (!pdata->cq_cnt) {
- snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx-txc",
- ndev->name);
- } else {
- snprintf(rx_ring->irq_name, IRQ_ID_SIZE, "%s-rx%d",
- ndev->name, i);
- }
buf_pool->rx_skb = devm_kcalloc(dev, buf_pool->slots,
sizeof(struct sk_buff *),
GFP_KERNEL);
@@ -1047,13 +1074,13 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
}
size = (tx_ring->slots / 2) * sizeof(__le64) * MAX_EXP_BUFFS;
- tx_ring->exp_bufs = dma_zalloc_coherent(dev, size,
- &dma_exp_bufs,
- GFP_KERNEL);
- if (!tx_ring->exp_bufs) {
+ exp_bufs = dmam_alloc_coherent(dev, size, &dma_exp_bufs,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!exp_bufs) {
ret = -ENOMEM;
goto err;
}
+ tx_ring->exp_bufs = exp_bufs;
pdata->tx_ring[i] = tx_ring;
@@ -1073,8 +1100,6 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
cp_ring->irq = pdata->irqs[pdata->rxq_cnt + i];
cp_ring->index = i;
- snprintf(cp_ring->irq_name, IRQ_ID_SIZE, "%s-txc%d",
- ndev->name, i);
}
cp_ring->cp_skb = devm_kcalloc(dev, tx_ring->slots,
@@ -1270,6 +1295,23 @@ static int xgene_enet_get_irqs(struct xgene_enet_pdata *pdata)
return 0;
}
+static int xgene_enet_check_phy_handle(struct xgene_enet_pdata *pdata)
+{
+ int ret;
+
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII)
+ return 0;
+
+ if (!IS_ENABLED(CONFIG_MDIO_XGENE))
+ return 0;
+
+ ret = xgene_enet_phy_connect(pdata->ndev);
+ if (!ret)
+ pdata->mdio_driver = true;
+
+ return 0;
+}
+
static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
{
struct platform_device *pdev;
@@ -1355,6 +1397,10 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
if (ret)
return ret;
+ ret = xgene_enet_check_phy_handle(pdata);
+ if (ret)
+ return ret;
+
pdata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pdata->clk)) {
/* Firmware may have set up the clock already. */
@@ -1434,6 +1480,7 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
pdata->port_ops->cle_bypass(pdata, dst_ring_num, buf_pool->id);
}
+ pdata->phy_speed = SPEED_UNKNOWN;
pdata->mac_ops->init(pdata);
return ret;
@@ -1543,28 +1590,12 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
}
}
-static void xgene_enet_napi_del(struct xgene_enet_pdata *pdata)
-{
- struct napi_struct *napi;
- int i;
-
- for (i = 0; i < pdata->rxq_cnt; i++) {
- napi = &pdata->rx_ring[i]->napi;
- netif_napi_del(napi);
- }
-
- for (i = 0; i < pdata->cq_cnt; i++) {
- napi = &pdata->tx_ring[i]->cp_ring->napi;
- netif_napi_del(napi);
- }
-}
-
static int xgene_enet_probe(struct platform_device *pdev)
{
struct net_device *ndev;
struct xgene_enet_pdata *pdata;
struct device *dev = &pdev->dev;
- const struct xgene_mac_ops *mac_ops;
+ void (*link_state)(struct work_struct *);
const struct of_device_id *of_id;
int ret;
@@ -1622,27 +1653,31 @@ static int xgene_enet_probe(struct platform_device *pdev)
goto err;
}
- ret = register_netdev(ndev);
- if (ret) {
- netdev_err(ndev, "Failed to register netdev\n");
- goto err;
- }
-
ret = xgene_enet_init_hw(pdata);
if (ret)
goto err_netdev;
- mac_ops = pdata->mac_ops;
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII) {
- ret = xgene_enet_mdio_config(pdata);
- if (ret)
- goto err_netdev;
- } else {
- INIT_DELAYED_WORK(&pdata->link_work, mac_ops->link_state);
+ link_state = pdata->mac_ops->link_state;
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_XGMII) {
+ INIT_DELAYED_WORK(&pdata->link_work, link_state);
+ } else if (!pdata->mdio_driver) {
+ if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ ret = xgene_enet_mdio_config(pdata);
+ else
+ INIT_DELAYED_WORK(&pdata->link_work, link_state);
}
+ if (ret)
+ goto err;
xgene_enet_napi_add(pdata);
+ ret = register_netdev(ndev);
+ if (ret) {
+ netdev_err(ndev, "Failed to register netdev\n");
+ goto err;
+ }
+
return 0;
+
err_netdev:
unregister_netdev(ndev);
err:
@@ -1660,20 +1695,38 @@ static int xgene_enet_remove(struct platform_device *pdev)
mac_ops = pdata->mac_ops;
ndev = pdata->ndev;
- mac_ops->rx_disable(pdata);
- mac_ops->tx_disable(pdata);
+ rtnl_lock();
+ if (netif_running(ndev))
+ dev_close(ndev);
+ rtnl_unlock();
- xgene_enet_napi_del(pdata);
- if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (pdata->mdio_driver)
+ xgene_enet_phy_disconnect(pdata);
+ else if (pdata->phy_mode == PHY_INTERFACE_MODE_RGMII)
xgene_enet_mdio_remove(pdata);
+
unregister_netdev(ndev);
- xgene_enet_delete_desc_rings(pdata);
pdata->port_ops->shutdown(pdata);
+ xgene_enet_delete_desc_rings(pdata);
free_netdev(ndev);
return 0;
}
+static void xgene_enet_shutdown(struct platform_device *pdev)
+{
+ struct xgene_enet_pdata *pdata;
+
+ pdata = platform_get_drvdata(pdev);
+ if (!pdata)
+ return;
+
+ if (!pdata->ndev)
+ return;
+
+ xgene_enet_remove(pdev);
+}
+
#ifdef CONFIG_ACPI
static const struct acpi_device_id xgene_enet_acpi_match[] = {
{ "APMC0D05", XGENE_ENET1},
@@ -1708,6 +1761,7 @@ static struct platform_driver xgene_enet_driver = {
},
.probe = xgene_enet_probe,
.remove = xgene_enet_remove,
+ .shutdown = xgene_enet_shutdown,
};
module_platform_driver(xgene_enet_driver);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 9d9cf445148c..217546e5714a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -38,6 +38,7 @@
#include "xgene_enet_hw.h"
#include "xgene_enet_cle.h"
#include "xgene_enet_ring2.h"
+#include "../../../phy/mdio-xgene.h"
#define XGENE_DRV_VERSION "v1.0"
#define XGENE_ENET_MAX_MTU 1536
@@ -140,6 +141,7 @@ struct xgene_mac_ops {
void (*rx_enable)(struct xgene_enet_pdata *pdata);
void (*tx_disable)(struct xgene_enet_pdata *pdata);
void (*rx_disable)(struct xgene_enet_pdata *pdata);
+ void (*set_speed)(struct xgene_enet_pdata *pdata);
void (*set_mac_addr)(struct xgene_enet_pdata *pdata);
void (*set_mss)(struct xgene_enet_pdata *pdata);
void (*link_state)(struct work_struct *work);
@@ -147,6 +149,8 @@ struct xgene_mac_ops {
struct xgene_port_ops {
int (*reset)(struct xgene_enet_pdata *pdata);
+ void (*clear)(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring);
void (*cle_bypass)(struct xgene_enet_pdata *pdata,
u32 dst_ring_num, u16 bufpool_id);
void (*shutdown)(struct xgene_enet_pdata *pdata);
@@ -201,7 +205,7 @@ struct xgene_enet_pdata {
const struct xgene_mac_ops *mac_ops;
const struct xgene_port_ops *port_ops;
struct xgene_ring_ops *ring_ops;
- struct xgene_cle_ops *cle_ops;
+ const struct xgene_cle_ops *cle_ops;
struct delayed_work link_work;
u32 port_id;
u8 cpu_bufnum;
@@ -211,6 +215,7 @@ struct xgene_enet_pdata {
u32 mss;
u8 tx_delay;
u8 rx_delay;
+ bool mdio_driver;
};
struct xgene_indirect_ctl {
@@ -220,34 +225,6 @@ struct xgene_indirect_ctl {
void __iomem *cmd_done;
};
-/* Set the specified value into a bit-field defined by its starting position
- * and length within a single u64.
- */
-static inline u64 xgene_enet_set_field_value(int pos, int len, u64 val)
-{
- return (val & ((1ULL << len) - 1)) << pos;
-}
-
-#define SET_VAL(field, val) \
- xgene_enet_set_field_value(field ## _POS, field ## _LEN, val)
-
-#define SET_BIT(field) \
- xgene_enet_set_field_value(field ## _POS, 1, 1)
-
-/* Get the value from a bit-field defined by its starting position
- * and length within the specified u64.
- */
-static inline u64 xgene_enet_get_field_value(int pos, int len, u64 src)
-{
- return (src >> pos) & ((1ULL << len) - 1);
-}
-
-#define GET_VAL(field, src) \
- xgene_enet_get_field_value(field ## _POS, field ## _LEN, src)
-
-#define GET_BIT(field, src) \
- xgene_enet_get_field_value(field ## _POS, 1, src)
-
static inline struct device *ndev_to_dev(struct net_device *ndev)
{
return ndev->dev.parent;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index 78475512b683..d12e9cbae820 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -28,6 +28,12 @@ static void xgene_enet_wr_csr(struct xgene_enet_pdata *p, u32 offset, u32 val)
iowrite32(val, p->eth_csr_addr + offset);
}
+static void xgene_enet_wr_clkrst_csr(struct xgene_enet_pdata *p, u32 offset,
+ u32 val)
+{
+ iowrite32(val, p->base_addr + offset);
+}
+
static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *p,
u32 offset, u32 val)
{
@@ -93,6 +99,11 @@ static u32 xgene_enet_rd_diag_csr(struct xgene_enet_pdata *p, u32 offset)
return ioread32(p->eth_diag_csr_addr + offset);
}
+static u32 xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *p, u32 offset)
+{
+ return ioread32(p->mcx_mac_csr_addr + offset);
+}
+
static u32 xgene_enet_rd_indirect(struct xgene_indirect_ctl *ctl, u32 rd_addr)
{
u32 rd_data;
@@ -132,9 +143,17 @@ static u32 xgene_enet_rd_mac(struct xgene_enet_pdata *p, u32 rd_addr)
static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
{
struct net_device *ndev = p->ndev;
- u32 data;
+ u32 data, shutdown;
int i = 0;
+ shutdown = xgene_enet_rd_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR);
+ data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
+
+ if (!shutdown && data == ~0U) {
+ netdev_dbg(ndev, "+ ecc_init done, skipping\n");
+ return 0;
+ }
+
xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
do {
usleep_range(100, 110);
@@ -230,21 +249,105 @@ static u32 xgene_enet_link_status(struct xgene_enet_pdata *p)
data = xgene_mii_phy_read(p, INT_PHY_ADDR,
SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
+ if (LINK_SPEED(data) == PHY_SPEED_1000)
+ p->phy_speed = SPEED_1000;
+ else if (LINK_SPEED(data) == PHY_SPEED_100)
+ p->phy_speed = SPEED_100;
+ else
+ p->phy_speed = SPEED_10;
+
return data & LINK_UP;
}
-static void xgene_sgmac_init(struct xgene_enet_pdata *p)
+static void xgene_sgmii_configure(struct xgene_enet_pdata *p)
{
- u32 data, loop = 10;
- u32 offset = p->port_id * 4;
- u32 enet_spare_cfg_reg, rsif_config_reg;
- u32 cfg_bypass_reg, rx_dv_gate_reg;
-
- xgene_sgmac_reset(p);
+ xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2,
+ 0x8000);
+ xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x9000);
+ xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
+}
- /* Enable auto-negotiation */
- xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_CONTROL_ADDR >> 2, 0x1000);
+static void xgene_sgmii_tbi_control_reset(struct xgene_enet_pdata *p)
+{
+ xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2,
+ 0x8000);
xgene_mii_phy_write(p, INT_PHY_ADDR, SGMII_TBI_CONTROL_ADDR >> 2, 0);
+}
+
+static void xgene_sgmii_reset(struct xgene_enet_pdata *p)
+{
+ u32 value;
+
+ if (p->phy_speed == SPEED_UNKNOWN)
+ return;
+
+ value = xgene_mii_phy_read(p, INT_PHY_ADDR,
+ SGMII_BASE_PAGE_ABILITY_ADDR >> 2);
+ if (!(value & LINK_UP))
+ xgene_sgmii_tbi_control_reset(p);
+}
+
+static void xgene_sgmac_set_speed(struct xgene_enet_pdata *p)
+{
+ u32 icm0_addr, icm2_addr, debug_addr;
+ u32 icm0, icm2, intf_ctl;
+ u32 mc2, value;
+
+ xgene_sgmii_reset(p);
+
+ if (p->enet_id == XGENE_ENET1) {
+ icm0_addr = ICM_CONFIG0_REG_0_ADDR + p->port_id * OFFSET_8;
+ icm2_addr = ICM_CONFIG2_REG_0_ADDR + p->port_id * OFFSET_4;
+ debug_addr = DEBUG_REG_ADDR;
+ } else {
+ icm0_addr = XG_MCX_ICM_CONFIG0_REG_0_ADDR;
+ icm2_addr = XG_MCX_ICM_CONFIG2_REG_0_ADDR;
+ debug_addr = XG_DEBUG_REG_ADDR;
+ }
+
+ icm0 = xgene_enet_rd_mcx_csr(p, icm0_addr);
+ icm2 = xgene_enet_rd_mcx_csr(p, icm2_addr);
+ mc2 = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR);
+ intf_ctl = xgene_enet_rd_mac(p, INTERFACE_CONTROL_ADDR);
+
+ switch (p->phy_speed) {
+ case SPEED_10:
+ ENET_INTERFACE_MODE2_SET(&mc2, 1);
+ intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE);
+ CFG_MACMODE_SET(&icm0, 0);
+ CFG_WAITASYNCRD_SET(&icm2, 500);
+ break;
+ case SPEED_100:
+ ENET_INTERFACE_MODE2_SET(&mc2, 1);
+ intf_ctl &= ~ENET_GHD_MODE;
+ intf_ctl |= ENET_LHD_MODE;
+ CFG_MACMODE_SET(&icm0, 1);
+ CFG_WAITASYNCRD_SET(&icm2, 80);
+ break;
+ default:
+ ENET_INTERFACE_MODE2_SET(&mc2, 2);
+ intf_ctl &= ~ENET_LHD_MODE;
+ intf_ctl |= ENET_GHD_MODE;
+ CFG_MACMODE_SET(&icm0, 2);
+ CFG_WAITASYNCRD_SET(&icm2, 16);
+ value = xgene_enet_rd_csr(p, debug_addr);
+ value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX;
+ xgene_enet_wr_csr(p, debug_addr, value);
+ break;
+ }
+
+ mc2 |= FULL_DUPLEX2 | PAD_CRC;
+ xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, mc2);
+ xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, intf_ctl);
+ xgene_enet_wr_mcx_csr(p, icm0_addr, icm0);
+ xgene_enet_wr_mcx_csr(p, icm2_addr, icm2);
+}
+
+static void xgene_sgmii_enable_autoneg(struct xgene_enet_pdata *p)
+{
+ u32 data, loop = 10;
+
+ xgene_sgmii_configure(p);
while (loop--) {
data = xgene_mii_phy_read(p, INT_PHY_ADDR,
@@ -255,17 +358,27 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
}
if (!(data & AUTO_NEG_COMPLETE) || !(data & LINK_STATUS))
netdev_err(p->ndev, "Auto-negotiation failed\n");
+}
+
+static void xgene_sgmac_init(struct xgene_enet_pdata *p)
+{
+ u32 enet_spare_cfg_reg, rsif_config_reg;
+ u32 cfg_bypass_reg, rx_dv_gate_reg;
+ u32 data, offset;
- data = xgene_enet_rd_mac(p, MAC_CONFIG_2_ADDR);
- ENET_INTERFACE_MODE2_SET(&data, 2);
- xgene_enet_wr_mac(p, MAC_CONFIG_2_ADDR, data | FULL_DUPLEX2);
- xgene_enet_wr_mac(p, INTERFACE_CONTROL_ADDR, ENET_GHD_MODE);
+ if (!(p->enet_id == XGENE_ENET2 && p->mdio_driver))
+ xgene_sgmac_reset(p);
+
+ xgene_sgmii_enable_autoneg(p);
+ xgene_sgmac_set_speed(p);
+ xgene_sgmac_set_mac_addr(p);
if (p->enet_id == XGENE_ENET1) {
enet_spare_cfg_reg = ENET_SPARE_CFG_REG_ADDR;
rsif_config_reg = RSIF_CONFIG_REG_ADDR;
cfg_bypass_reg = CFG_BYPASS_ADDR;
- rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR;
+ offset = p->port_id * OFFSET_4;
+ rx_dv_gate_reg = SG_RX_DV_GATE_REG_0_ADDR + offset;
} else {
enet_spare_cfg_reg = XG_ENET_SPARE_CFG_REG_ADDR;
rsif_config_reg = XG_RSIF_CONFIG_REG_ADDR;
@@ -277,8 +390,6 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
data |= MPA_IDLE_WITH_QMI_EMPTY;
xgene_enet_wr_csr(p, enet_spare_cfg_reg, data);
- xgene_sgmac_set_mac_addr(p);
-
/* Adjust MDC clock frequency */
data = xgene_enet_rd_mac(p, MII_MGMT_CONFIG_ADDR);
MGMT_CLOCK_SEL_SET(&data, 7);
@@ -292,7 +403,7 @@ static void xgene_sgmac_init(struct xgene_enet_pdata *p)
/* Bypass traffic gating */
xgene_enet_wr_csr(p, XG_ENET_SPARE_CFG_REG_1_ADDR, 0x84);
xgene_enet_wr_csr(p, cfg_bypass_reg, RESUME_TX);
- xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg + offset, RESUME_RX0);
+ xgene_enet_wr_mcx_csr(p, rx_dv_gate_reg, RESUME_RX0);
}
static void xgene_sgmac_rxtx(struct xgene_enet_pdata *p, u32 bits, bool set)
@@ -331,17 +442,43 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
static int xgene_enet_reset(struct xgene_enet_pdata *p)
{
+ struct device *dev = &p->pdev->dev;
+
if (!xgene_ring_mgr_init(p))
return -ENODEV;
- if (!IS_ERR(p->clk)) {
- clk_prepare_enable(p->clk);
- clk_disable_unprepare(p->clk);
- clk_prepare_enable(p->clk);
+ if (p->mdio_driver && p->enet_id == XGENE_ENET2) {
+ xgene_enet_config_ring_if_assoc(p);
+ return 0;
}
- xgene_enet_ecc_init(p);
- xgene_enet_config_ring_if_assoc(p);
+ if (p->enet_id == XGENE_ENET2)
+ xgene_enet_wr_clkrst_csr(p, XGENET_CONFIG_REG_ADDR, SGMII_EN);
+
+ if (dev->of_node) {
+ if (!IS_ERR(p->clk)) {
+ clk_prepare_enable(p->clk);
+ udelay(5);
+ clk_disable_unprepare(p->clk);
+ udelay(5);
+ clk_prepare_enable(p->clk);
+ udelay(5);
+ }
+ } else {
+#ifdef CONFIG_ACPI
+ if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_RST"))
+ acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
+ "_RST", NULL, NULL);
+ else if (acpi_has_method(ACPI_HANDLE(&p->pdev->dev), "_INI"))
+ acpi_evaluate_object(ACPI_HANDLE(&p->pdev->dev),
+ "_INI", NULL, NULL);
+#endif
+ }
+
+ if (!p->port_id) {
+ xgene_enet_ecc_init(p);
+ xgene_enet_config_ring_if_assoc(p);
+ }
return 0;
}
@@ -369,10 +506,53 @@ static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
xgene_enet_wr_csr(p, cle_bypass_reg1 + offset, data);
}
+static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring)
+{
+ u32 addr, val, data;
+
+ val = xgene_enet_ring_bufnum(ring->id);
+
+ if (xgene_enet_is_bufpool(ring->id)) {
+ addr = ENET_CFGSSQMIFPRESET_ADDR;
+ data = BIT(val - 0x20);
+ } else {
+ addr = ENET_CFGSSQMIWQRESET_ADDR;
+ data = BIT(val);
+ }
+
+ xgene_enet_wr_ring_if(pdata, addr, data);
+}
+
static void xgene_enet_shutdown(struct xgene_enet_pdata *p)
{
- if (!IS_ERR(p->clk))
- clk_disable_unprepare(p->clk);
+ struct device *dev = &p->pdev->dev;
+ struct xgene_enet_desc_ring *ring;
+ u32 pb, val;
+ int i;
+
+ pb = 0;
+ for (i = 0; i < p->rxq_cnt; i++) {
+ ring = p->rx_ring[i]->buf_pool;
+
+ val = xgene_enet_ring_bufnum(ring->id);
+ pb |= BIT(val - 0x20);
+ }
+ xgene_enet_wr_ring_if(p, ENET_CFGSSQMIFPRESET_ADDR, pb);
+
+ pb = 0;
+ for (i = 0; i < p->txq_cnt; i++) {
+ ring = p->tx_ring[i];
+
+ val = xgene_enet_ring_bufnum(ring->id);
+ pb |= BIT(val);
+ }
+ xgene_enet_wr_ring_if(p, ENET_CFGSSQMIWQRESET_ADDR, pb);
+
+ if (dev->of_node) {
+ if (!IS_ERR(p->clk))
+ clk_disable_unprepare(p->clk);
+ }
}
static void xgene_enet_link_state(struct work_struct *work)
@@ -386,10 +566,11 @@ static void xgene_enet_link_state(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(ndev)) {
netif_carrier_on(ndev);
- xgene_sgmac_init(p);
+ xgene_sgmac_set_speed(p);
xgene_sgmac_rx_enable(p);
xgene_sgmac_tx_enable(p);
- netdev_info(ndev, "Link is Up - 1Gbps\n");
+ netdev_info(ndev, "Link is Up - %dMbps\n",
+ p->phy_speed);
}
poll_interval = PHY_POLL_LINK_ON;
} else {
@@ -412,12 +593,14 @@ const struct xgene_mac_ops xgene_sgmac_ops = {
.tx_enable = xgene_sgmac_tx_enable,
.rx_disable = xgene_sgmac_rx_disable,
.tx_disable = xgene_sgmac_tx_disable,
+ .set_speed = xgene_sgmac_set_speed,
.set_mac_addr = xgene_sgmac_set_mac_addr,
.link_state = xgene_enet_link_state
};
const struct xgene_port_ops xgene_sgport_ops = {
.reset = xgene_enet_reset,
+ .clear = xgene_enet_clear,
.cle_bypass = xgene_enet_cle_bypass,
.shutdown = xgene_enet_shutdown
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
index 002df5a6756e..3d0ba374491b 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.h
@@ -24,6 +24,7 @@
#define PHY_ADDR(src) (((src)<<8) & GENMASK(12, 8))
#define REG_ADDR(src) ((src) & GENMASK(4, 0))
#define PHY_CONTROL(src) ((src) & GENMASK(15, 0))
+#define LINK_SPEED(src) (((src) & GENMASK(11, 10)) >> 10)
#define INT_PHY_ADDR 0x1e
#define SGMII_TBI_CONTROL_ADDR 0x44
#define SGMII_CONTROL_ADDR 0x00
@@ -34,6 +35,13 @@
#define LINK_UP BIT(15)
#define MPA_IDLE_WITH_QMI_EMPTY BIT(12)
#define SG_RX_DV_GATE_REG_0_ADDR 0x05fc
+#define SGMII_EN 0x1
+
+enum xgene_phy_speed {
+ PHY_SPEED_10,
+ PHY_SPEED_100,
+ PHY_SPEED_1000
+};
extern const struct xgene_mac_ops xgene_sgmac_ops;
extern const struct xgene_port_ops xgene_sgport_ops;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index ba030dc1940b..9c6ad0dce00f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -258,13 +258,29 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
{
+ struct device *dev = &pdata->pdev->dev;
+
if (!xgene_ring_mgr_init(pdata))
return -ENODEV;
- if (!IS_ERR(pdata->clk)) {
+ if (dev->of_node) {
clk_prepare_enable(pdata->clk);
+ udelay(5);
clk_disable_unprepare(pdata->clk);
+ udelay(5);
clk_prepare_enable(pdata->clk);
+ udelay(5);
+ } else {
+#ifdef CONFIG_ACPI
+ if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev), "_RST")) {
+ acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_RST", NULL, NULL);
+ } else if (acpi_has_method(ACPI_HANDLE(&pdata->pdev->dev),
+ "_INI")) {
+ acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev),
+ "_INI", NULL, NULL);
+ }
+#endif
}
xgene_enet_ecc_init(pdata);
@@ -292,8 +308,51 @@ static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
static void xgene_enet_shutdown(struct xgene_enet_pdata *pdata)
{
- if (!IS_ERR(pdata->clk))
- clk_disable_unprepare(pdata->clk);
+ struct device *dev = &pdata->pdev->dev;
+ struct xgene_enet_desc_ring *ring;
+ u32 pb, val;
+ int i;
+
+ pb = 0;
+ for (i = 0; i < pdata->rxq_cnt; i++) {
+ ring = pdata->rx_ring[i]->buf_pool;
+
+ val = xgene_enet_ring_bufnum(ring->id);
+ pb |= BIT(val - 0x20);
+ }
+ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPRESET_ADDR, pb);
+
+ pb = 0;
+ for (i = 0; i < pdata->txq_cnt; i++) {
+ ring = pdata->tx_ring[i];
+
+ val = xgene_enet_ring_bufnum(ring->id);
+ pb |= BIT(val);
+ }
+ xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQRESET_ADDR, pb);
+
+ if (dev->of_node) {
+ if (!IS_ERR(pdata->clk))
+ clk_disable_unprepare(pdata->clk);
+ }
+}
+
+static void xgene_enet_clear(struct xgene_enet_pdata *pdata,
+ struct xgene_enet_desc_ring *ring)
+{
+ u32 addr, val, data;
+
+ val = xgene_enet_ring_bufnum(ring->id);
+
+ if (xgene_enet_is_bufpool(ring->id)) {
+ addr = ENET_CFGSSQMIFPRESET_ADDR;
+ data = BIT(val - 0x20);
+ } else {
+ addr = ENET_CFGSSQMIWQRESET_ADDR;
+ data = BIT(val);
+ }
+
+ xgene_enet_wr_ring_if(pdata, addr, data);
}
static void xgene_enet_link_state(struct work_struct *work)
@@ -340,6 +399,7 @@ const struct xgene_mac_ops xgene_xgmac_ops = {
const struct xgene_port_ops xgene_xgport_ops = {
.reset = xgene_enet_reset,
+ .clear = xgene_enet_clear,
.cle_bypass = xgene_enet_xgcle_bypass,
.shutdown = xgene_enet_shutdown,
};
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
index 0a2dca8a1725..f1ea485f916b 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.h
@@ -65,9 +65,12 @@
#define XG_CFG_LINK_AGGR_RESUME_0_ADDR 0x0214
#define XG_LINK_STATUS_ADDR 0x0228
#define XG_TSIF_MSS_REG0_ADDR 0x02a4
+#define XG_DEBUG_REG_ADDR 0x0400
#define XG_ENET_SPARE_CFG_REG_ADDR 0x040c
#define XG_ENET_SPARE_CFG_REG_1_ADDR 0x0410
#define XGENET_RX_DV_GATE_REG_0_ADDR 0x0804
+#define XG_MCX_ICM_CONFIG0_REG_0_ADDR 0x00e0
+#define XG_MCX_ICM_CONFIG2_REG_0_ADDR 0x00e8
extern const struct xgene_mac_ops xgene_xgmac_ops;
extern const struct xgene_port_ops xgene_xgport_ops;
diff --git a/drivers/net/ethernet/arc/emac.h b/drivers/net/ethernet/arc/emac.h
index ca562bc034c3..e4feb712d4f2 100644
--- a/drivers/net/ethernet/arc/emac.h
+++ b/drivers/net/ethernet/arc/emac.h
@@ -134,7 +134,6 @@ struct arc_emac_priv {
/* Devices */
struct device *dev;
- struct phy_device *phy_dev;
struct mii_bus *bus;
struct arc_emac_mdio_bus_data bus_data;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index a3a9392a4954..b0da9693f28a 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -47,7 +47,7 @@ static inline int arc_emac_tx_avail(struct arc_emac_priv *priv)
static void arc_emac_adjust_link(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
- struct phy_device *phy_dev = priv->phy_dev;
+ struct phy_device *phy_dev = ndev->phydev;
unsigned int reg, state_changed = 0;
if (priv->link != phy_dev->link) {
@@ -80,46 +80,6 @@ static void arc_emac_adjust_link(struct net_device *ndev)
}
/**
- * arc_emac_get_settings - Get PHY settings.
- * @ndev: Pointer to net_device structure.
- * @cmd: Pointer to ethtool_cmd structure.
- *
- * This implements ethtool command for getting PHY settings. If PHY could
- * not be found, the function returns -ENODEV. This function calls the
- * relevant PHY ethtool API to get the PHY settings.
- * Issue "ethtool ethX" under linux prompt to execute this function.
- */
-static int arc_emac_get_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct arc_emac_priv *priv = netdev_priv(ndev);
-
- return phy_ethtool_gset(priv->phy_dev, cmd);
-}
-
-/**
- * arc_emac_set_settings - Set PHY settings as passed in the argument.
- * @ndev: Pointer to net_device structure.
- * @cmd: Pointer to ethtool_cmd structure.
- *
- * This implements ethtool command for setting various PHY settings. If PHY
- * could not be found, the function returns -ENODEV. This function calls the
- * relevant PHY ethtool API to set the PHY.
- * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
- * function.
- */
-static int arc_emac_set_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct arc_emac_priv *priv = netdev_priv(ndev);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- return phy_ethtool_sset(priv->phy_dev, cmd);
-}
-
-/**
* arc_emac_get_drvinfo - Get EMAC driver information.
* @ndev: Pointer to net_device structure.
* @info: Pointer to ethtool_drvinfo structure.
@@ -137,10 +97,10 @@ static void arc_emac_get_drvinfo(struct net_device *ndev,
}
static const struct ethtool_ops arc_emac_ethtool_ops = {
- .get_settings = arc_emac_get_settings,
- .set_settings = arc_emac_set_settings,
.get_drvinfo = arc_emac_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
#define FIRST_OR_LAST_MASK (FIRST_MASK | LAST_MASK)
@@ -403,7 +363,7 @@ static void arc_emac_poll_controller(struct net_device *dev)
static int arc_emac_open(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
- struct phy_device *phy_dev = priv->phy_dev;
+ struct phy_device *phy_dev = ndev->phydev;
int i;
phy_dev->autoneg = AUTONEG_ENABLE;
@@ -474,7 +434,7 @@ static int arc_emac_open(struct net_device *ndev)
/* Enable EMAC */
arc_reg_or(priv, R_CTRL, EN_MASK);
- phy_start_aneg(priv->phy_dev);
+ phy_start_aneg(ndev->phydev);
netif_start_queue(ndev);
@@ -772,6 +732,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
struct device *dev = ndev->dev.parent;
struct resource res_regs;
struct device_node *phy_node;
+ struct phy_device *phydev = NULL;
struct arc_emac_priv *priv;
const char *mac_addr;
unsigned int id, clock_frequency, irq;
@@ -788,14 +749,16 @@ int arc_emac_probe(struct net_device *ndev, int interface)
err = of_address_to_resource(dev->of_node, 0, &res_regs);
if (err) {
dev_err(dev, "failed to retrieve registers base from device tree\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto out_put_node;
}
/* Get IRQ from device tree */
irq = irq_of_parse_and_map(dev->of_node, 0);
if (!irq) {
dev_err(dev, "failed to retrieve <irq> value from device tree\n");
- return -ENODEV;
+ err = -ENODEV;
+ goto out_put_node;
}
ndev->netdev_ops = &arc_emac_netdev_ops;
@@ -808,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface)
priv->dev = dev;
priv->regs = devm_ioremap_resource(dev, &res_regs);
- if (IS_ERR(priv->regs))
- return PTR_ERR(priv->regs);
+ if (IS_ERR(priv->regs)) {
+ err = PTR_ERR(priv->regs);
+ goto out_put_node;
+ }
dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs);
@@ -817,7 +782,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
err = clk_prepare_enable(priv->clk);
if (err) {
dev_err(dev, "failed to enable clock\n");
- return err;
+ goto out_put_node;
}
clock_frequency = clk_get_rate(priv->clk);
@@ -826,7 +791,8 @@ int arc_emac_probe(struct net_device *ndev, int interface)
if (of_property_read_u32(dev->of_node, "clock-frequency",
&clock_frequency)) {
dev_err(dev, "failed to retrieve <clock-frequency> from device tree\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto out_put_node;
}
}
@@ -887,16 +853,16 @@ int arc_emac_probe(struct net_device *ndev, int interface)
goto out_clken;
}
- priv->phy_dev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
- interface);
- if (!priv->phy_dev) {
+ phydev = of_phy_connect(ndev, phy_node, arc_emac_adjust_link, 0,
+ interface);
+ if (!phydev) {
dev_err(dev, "of_phy_connect() failed\n");
err = -ENODEV;
goto out_mdio;
}
dev_info(dev, "connected to %s phy with id 0x%x\n",
- priv->phy_dev->drv->name, priv->phy_dev->phy_id);
+ phydev->drv->name, phydev->phy_id);
netif_napi_add(ndev, &priv->napi, arc_emac_poll, ARC_EMAC_NAPI_WEIGHT);
@@ -906,17 +872,20 @@ int arc_emac_probe(struct net_device *ndev, int interface)
goto out_netif_api;
}
+ of_node_put(phy_node);
return 0;
out_netif_api:
netif_napi_del(&priv->napi);
- phy_disconnect(priv->phy_dev);
- priv->phy_dev = NULL;
+ phy_disconnect(phydev);
out_mdio:
arc_mdio_remove(priv);
out_clken:
if (priv->clk)
clk_disable_unprepare(priv->clk);
+out_put_node:
+ of_node_put(phy_node);
+
return err;
}
EXPORT_SYMBOL_GPL(arc_emac_probe);
@@ -925,8 +894,7 @@ int arc_emac_remove(struct net_device *ndev)
{
struct arc_emac_priv *priv = netdev_priv(ndev);
- phy_disconnect(priv->phy_dev);
- priv->phy_dev = NULL;
+ phy_disconnect(ndev->phydev);
arc_mdio_remove(priv);
unregister_netdev(ndev);
netif_napi_del(&priv->napi);
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 16419f550eff..058460bdd5a6 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -141,7 +141,7 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
priv->bus = bus;
bus->priv = priv;
bus->parent = priv->dev;
- bus->name = "Synopsys MII Bus",
+ bus->name = "Synopsys MII Bus";
bus->read = &arc_mdio_read;
bus->write = &arc_mdio_write;
bus->reset = &arc_mdio_reset;
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 55b118e876fd..4eb17daefc4f 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -86,9 +86,22 @@ static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
while (!cur_buf->skb && next != rxq->read_idx) {
struct alx_rfd *rfd = &rxq->rfd[cur];
- skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
+ /*
+ * When DMA RX address is set to something like
+ * 0x....fc0, it will be very likely to cause DMA
+ * RFD overflow issue.
+ *
+ * To work around it, we apply rx skb with 64 bytes
+ * longer space, and offset the address whenever
+ * 0x....fc0 is detected.
+ */
+ skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size + 64, gfp);
if (!skb)
break;
+
+ if (((unsigned long)skb->data & 0xfff) == 0xfc0)
+ skb_reserve(skb, 64);
+
dma = dma_map_single(&alx->hw.pdev->dev,
skb->data, alx->rxbuf_size,
DMA_FROM_DEVICE);
@@ -745,7 +758,7 @@ static netdev_features_t alx_fix_features(struct net_device *netdev,
static void alx_netif_stop(struct alx_priv *alx)
{
- alx->dev->trans_start = jiffies;
+ netif_trans_update(alx->dev);
if (netif_carrier_ok(alx->dev)) {
netif_carrier_off(alx->dev);
netif_tx_disable(alx->dev);
@@ -1238,7 +1251,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct alx_priv *alx;
struct alx_hw *hw;
bool phy_configured;
- int bars, err;
+ int err;
err = pci_enable_device_mem(pdev);
if (err)
@@ -1258,11 +1271,10 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- bars = pci_select_bars(pdev, IORESOURCE_MEM);
- err = pci_request_selected_regions(pdev, bars, alx_drv_name);
+ err = pci_request_mem_regions(pdev, alx_drv_name);
if (err) {
dev_err(&pdev->dev,
- "pci_request_selected_regions failed(bars:%d)\n", bars);
+ "pci_request_mem_regions failed\n");
goto out_pci_disable;
}
@@ -1388,7 +1400,7 @@ out_unmap:
out_free_netdev:
free_netdev(netdev);
out_pci_release:
- pci_release_selected_regions(pdev, bars);
+ pci_release_mem_regions(pdev);
out_pci_disable:
pci_disable_device(pdev);
return err;
@@ -1407,8 +1419,7 @@ static void alx_remove(struct pci_dev *pdev)
unregister_netdev(alx->dev);
iounmap(hw->hw_addr);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
@@ -1534,6 +1545,8 @@ static const struct pci_device_id alx_pci_tbl[] = {
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500),
+ .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
index 0959e6824cb6..1fc2d852249f 100644
--- a/drivers/net/ethernet/atheros/alx/reg.h
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -38,6 +38,7 @@
#define ALX_DEV_ID_AR8161 0x1091
#define ALX_DEV_ID_E2200 0xe091
#define ALX_DEV_ID_E2400 0xe0a1
+#define ALX_DEV_ID_E2500 0xe0b1
#define ALX_DEV_ID_AR8162 0x1090
#define ALX_DEV_ID_AR8171 0x10A1
#define ALX_DEV_ID_AR8172 0x10A0
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h
index b9203d928938..c46b489ce9b4 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c.h
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h
@@ -488,7 +488,7 @@ struct atl1c_tpd_ring {
dma_addr_t dma; /* descriptor ring physical address */
u16 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
- u16 next_to_use; /* this is protectd by adapter->tx_lock */
+ u16 next_to_use;
atomic_t next_to_clean;
struct atl1c_buffer *buffer_info;
};
@@ -542,7 +542,6 @@ struct atl1c_adapter {
u16 link_duplex;
spinlock_t mdio_lock;
- spinlock_t tx_lock;
atomic_t irq_sem;
struct work_struct common_task;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index d0084d4d1a9b..a3200ea6d765 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -821,7 +821,6 @@ static int atl1c_sw_init(struct atl1c_adapter *adapter)
atl1c_set_rxbufsize(adapter, adapter->netdev);
atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->mdio_lock);
- spin_lock_init(&adapter->tx_lock);
set_bit(__AT_DOWN, &adapter->flags);
return 0;
@@ -2206,7 +2205,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
- unsigned long flags;
u16 tpd_req = 1;
struct atl1c_tpd_desc *tpd;
enum atl1c_trans_queue type = atl1c_trans_normal;
@@ -2217,16 +2215,10 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
}
tpd_req = atl1c_cal_tpd_req(skb);
- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
- if (netif_msg_pktdata(adapter))
- dev_info(&adapter->pdev->dev, "tx locked\n");
- return NETDEV_TX_LOCKED;
- }
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
/* no enough descriptor, just stop queue */
netif_stop_queue(netdev);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -2234,7 +2226,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
/* do TSO and check sum */
if (atl1c_tso_csum(adapter, skb, &tpd, type) != 0) {
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -2257,12 +2248,10 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
"tx-skb droppted due to dma error\n");
/* roll back tpd/buffer */
atl1c_tx_rollback(adapter, tpd, type);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
} else {
netdev_sent_queue(adapter->netdev, skb->len);
atl1c_tx_queue(adapter, skb, tpd, type);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
}
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e.h b/drivers/net/ethernet/atheros/atl1e/atl1e.h
index 0212dac7e23a..632bb843aed6 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -442,7 +442,6 @@ struct atl1e_adapter {
u16 link_duplex;
spinlock_t mdio_lock;
- spinlock_t tx_lock;
atomic_t irq_sem;
struct work_struct reset_task;
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 59a03a193e83..974713b19ab6 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -648,7 +648,6 @@ static int atl1e_sw_init(struct atl1e_adapter *adapter)
atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->mdio_lock);
- spin_lock_init(&adapter->tx_lock);
set_bit(__AT_DOWN, &adapter->flags);
@@ -1866,7 +1865,6 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- unsigned long flags;
u16 tpd_req = 1;
struct atl1e_tpd_desc *tpd;
@@ -1880,13 +1878,10 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
return NETDEV_TX_OK;
}
tpd_req = atl1e_cal_tdp_req(skb);
- if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
- return NETDEV_TX_LOCKED;
if (atl1e_tpd_avail(adapter) < tpd_req) {
/* no enough descriptor, just stop queue */
netif_stop_queue(netdev);
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -1910,7 +1905,6 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
/* do TSO and check sum */
if (atl1e_tso_csum(adapter, skb, tpd) != 0) {
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
@@ -1921,10 +1915,7 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
}
atl1e_tx_queue(adapter, tpd_req, tpd);
-
- netdev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
out:
- spin_unlock_irqrestore(&adapter->tx_lock, flags);
return NETDEV_TX_OK;
}
@@ -2285,8 +2276,7 @@ static int atl1e_init_netdev(struct net_device *netdev, struct pci_dev *pdev)
netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO |
NETIF_F_HW_VLAN_CTAG_RX;
- netdev->features = netdev->hw_features | NETIF_F_LLTX |
- NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_CTAG_TX;
/* not enabled by default */
netdev->hw_features |= NETIF_F_RXALL | NETIF_F_RXFCS;
return 0;
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index 08a23e6b60e9..b047fd607b83 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -259,6 +259,7 @@ static void nb8800_receive(struct net_device *dev, unsigned int i,
if (err) {
netdev_err(dev, "rx buffer allocation failed\n");
dev->stats.rx_dropped++;
+ dev_kfree_skb(skb);
return;
}
@@ -631,7 +632,7 @@ static void nb8800_mac_config(struct net_device *dev)
static void nb8800_pause_config(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
u32 rxcr;
if (priv->pause_aneg) {
@@ -664,7 +665,7 @@ static void nb8800_pause_config(struct net_device *dev)
static void nb8800_link_reconfigure(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
int change = 0;
if (phydev->link) {
@@ -690,7 +691,7 @@ static void nb8800_link_reconfigure(struct net_device *dev)
}
if (change)
- phy_print_status(priv->phydev);
+ phy_print_status(phydev);
}
static void nb8800_update_mac_addr(struct net_device *dev)
@@ -935,9 +936,10 @@ static int nb8800_dma_stop(struct net_device *dev)
static void nb8800_pause_adv(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
u32 adv = 0;
- if (!priv->phydev)
+ if (!phydev)
return;
if (priv->pause_rx)
@@ -945,13 +947,14 @@ static void nb8800_pause_adv(struct net_device *dev)
if (priv->pause_tx)
adv ^= ADVERTISED_Asym_Pause;
- priv->phydev->supported |= adv;
- priv->phydev->advertising |= adv;
+ phydev->supported |= adv;
+ phydev->advertising |= adv;
}
static int nb8800_open(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev;
int err;
/* clear any pending interrupts */
@@ -969,10 +972,10 @@ static int nb8800_open(struct net_device *dev)
nb8800_mac_rx(dev, true);
nb8800_mac_tx(dev, true);
- priv->phydev = of_phy_connect(dev, priv->phy_node,
- nb8800_link_reconfigure, 0,
- priv->phy_mode);
- if (!priv->phydev)
+ phydev = of_phy_connect(dev, priv->phy_node,
+ nb8800_link_reconfigure, 0,
+ priv->phy_mode);
+ if (!phydev)
goto err_free_irq;
nb8800_pause_adv(dev);
@@ -982,7 +985,7 @@ static int nb8800_open(struct net_device *dev)
netif_start_queue(dev);
nb8800_start_rx(dev);
- phy_start(priv->phydev);
+ phy_start(phydev);
return 0;
@@ -997,8 +1000,9 @@ err_free_dma:
static int nb8800_stop(struct net_device *dev)
{
struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
- phy_stop(priv->phydev);
+ phy_stop(phydev);
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1007,8 +1011,7 @@ static int nb8800_stop(struct net_device *dev)
nb8800_mac_rx(dev, false);
nb8800_mac_tx(dev, false);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ phy_disconnect(phydev);
free_irq(dev->irq, dev);
@@ -1019,9 +1022,7 @@ static int nb8800_stop(struct net_device *dev)
static int nb8800_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
static const struct net_device_ops nb8800_netdev_ops = {
@@ -1035,34 +1036,14 @@ static const struct net_device_ops nb8800_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-static int nb8800_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- if (!priv->phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
-static int nb8800_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct nb8800_priv *priv = netdev_priv(dev);
-
- if (!priv->phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
static int nb8800_nway_reset(struct net_device *dev)
{
- struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
- if (!priv->phydev)
+ if (!phydev)
return -ENODEV;
- return genphy_restart_aneg(priv->phydev);
+ return genphy_restart_aneg(phydev);
}
static void nb8800_get_pauseparam(struct net_device *dev,
@@ -1079,6 +1060,7 @@ static int nb8800_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *pp)
{
struct nb8800_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
priv->pause_aneg = pp->autoneg;
priv->pause_rx = pp->rx_pause;
@@ -1088,8 +1070,8 @@ static int nb8800_set_pauseparam(struct net_device *dev,
if (!priv->pause_aneg)
nb8800_pause_config(dev);
- else if (priv->phydev)
- phy_start_aneg(priv->phydev);
+ else if (phydev)
+ phy_start_aneg(phydev);
return 0;
}
@@ -1182,8 +1164,6 @@ static void nb8800_get_ethtool_stats(struct net_device *dev,
}
static const struct ethtool_ops nb8800_ethtool_ops = {
- .get_settings = nb8800_get_settings,
- .set_settings = nb8800_set_settings,
.nway_reset = nb8800_nway_reset,
.get_link = ethtool_op_get_link,
.get_pauseparam = nb8800_get_pauseparam,
@@ -1191,6 +1171,8 @@ static const struct ethtool_ops nb8800_ethtool_ops = {
.get_sset_count = nb8800_get_sset_count,
.get_strings = nb8800_get_strings,
.get_ethtool_stats = nb8800_get_ethtool_stats,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int nb8800_hw_init(struct net_device *dev)
@@ -1437,7 +1419,7 @@ static int nb8800_probe(struct platform_device *pdev)
if (ops && ops->reset) {
ret = ops->reset(dev);
if (ret)
- goto err_free_dev;
+ goto err_disable_clk;
}
bus = devm_mdiobus_alloc(&pdev->dev);
@@ -1522,6 +1504,7 @@ static int nb8800_probe(struct platform_device *pdev)
err_free_dma:
nb8800_dma_free(dev);
err_free_bus:
+ of_node_put(priv->phy_node);
mdiobus_unregister(bus);
err_disable_clk:
clk_disable_unprepare(priv->clk);
@@ -1537,6 +1520,7 @@ static int nb8800_remove(struct platform_device *pdev)
struct nb8800_priv *priv = netdev_priv(ndev);
unregister_netdev(ndev);
+ of_node_put(priv->phy_node);
mdiobus_unregister(priv->mii_bus);
diff --git a/drivers/net/ethernet/aurora/nb8800.h b/drivers/net/ethernet/aurora/nb8800.h
index e5adbc2aac9f..6ec4a956e1e5 100644
--- a/drivers/net/ethernet/aurora/nb8800.h
+++ b/drivers/net/ethernet/aurora/nb8800.h
@@ -284,7 +284,6 @@ struct nb8800_priv {
struct mii_bus *mii_bus;
struct device_node *phy_node;
- struct phy_device *phydev;
/* PHY connection type from DT */
int phy_mode;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 18042c2460bd..bd8c80c0b71c 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -139,31 +139,19 @@ config BNX2X_SRIOV
Virtualization support in the 578xx and 57712 products. This
allows for virtual function acceleration in virtual environments.
-config BNX2X_VXLAN
- bool "Virtual eXtensible Local Area Network support"
- default n
- depends on BNX2X && VXLAN && !(BNX2X=y && VXLAN=m)
- ---help---
- This enables hardward offload support for VXLAN protocol over the
- NetXtremeII series adapters.
- Say Y here if you want to enable hardware offload support for
- Virtual eXtensible Local Area Network (VXLAN) in the driver.
-
-config BNX2X_GENEVE
- bool "Generic Network Virtualization Encapsulation (GENEVE) support"
- depends on BNX2X && GENEVE && !(BNX2X=y && GENEVE=m)
- ---help---
- This allows one to create GENEVE virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. GENEVE is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to enable hardware offload support for
- Generic Network Virtualization Encapsulation (GENEVE) in the driver.
-
config BGMAC
- tristate "BCMA bus GBit core support"
+ tristate
+ help
+ This enables the integrated ethernet controller support for many
+ Broadcom (mostly iProc) SoCs. An appropriate bus interface driver
+ needs to be enabled to select this.
+
+config BGMAC_BCMA
+ tristate "Broadcom iProc GBit BCMA support"
depends on BCMA && BCMA_HOST_SOC
depends on HAS_DMA
depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
+ select BGMAC
select PHYLIB
select FIXED_PHY
---help---
@@ -172,6 +160,19 @@ config BGMAC
In case of using this driver on BCM4706 it's also requires to enable
BCMA_DRIVER_GMAC_CMN to make it work.
+config BGMAC_PLATFORM
+ tristate "Broadcom iProc GBit platform support"
+ depends on HAS_DMA
+ depends on ARCH_BCM_IPROC || COMPILE_TEST
+ depends on OF
+ select BGMAC
+ select PHYLIB
+ select FIXED_PHY
+ default ARCH_BCM_IPROC
+ ---help---
+ Say Y here if you want to use the Broadcom iProc Gigabit Ethernet
+ controller through the generic platform interface
+
config SYSTEMPORT
tristate "Broadcom SYSTEMPORT internal MAC support"
depends on OF
@@ -186,7 +187,6 @@ config SYSTEMPORT
config BNXT
tristate "Broadcom NetXtreme-C/E support"
depends on PCI
- depends on VXLAN || VXLAN=n
select FW_LOADER
select LIBCRC32C
---help---
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 00584d78b3e0..79f2372c66ec 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -11,5 +11,7 @@ obj-$(CONFIG_BNX2X) += bnx2x/
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BGMAC) += bgmac.o
+obj-$(CONFIG_BGMAC_BCMA) += bgmac-bcma.o bgmac-bcma-mdio.o
+obj-$(CONFIG_BGMAC_PLATFORM) += bgmac-platform.o
obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
obj-$(CONFIG_BNXT) += bnxt/
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 87c6b5bdd616..6c8bc5fadac7 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1859,7 +1859,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
} else {
/* run platform code to initialize PHY device */
- if (pd->mii_config &&
+ if (pd && pd->mii_config &&
pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
bcm_enet_mdio_write_mii)) {
dev_err(&pdev->dev, "unable to configure mdio bus\n");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 993c780bdfab..b2d30863caeb 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -96,28 +96,6 @@ static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
}
/* Ethtool operations */
-static int bcm_sysport_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct bcm_sysport_priv *priv = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -EINVAL;
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
-static int bcm_sysport_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct bcm_sysport_priv *priv = netdev_priv(dev);
-
- if (!netif_running(dev))
- return -EINVAL;
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
static int bcm_sysport_set_rx_csum(struct net_device *dev,
netdev_features_t wanted)
{
@@ -392,7 +370,7 @@ static void bcm_sysport_get_stats(struct net_device *dev,
else
p = (char *)priv;
p += s->stat_offset;
- data[i] = *(u32 *)p;
+ data[i] = *(unsigned long *)p;
}
}
@@ -831,7 +809,7 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* re-enable RX interrupts */
intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
}
@@ -873,7 +851,7 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
if (likely(napi_schedule_prep(&priv->napi))) {
/* disable RX interrupts */
intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
- __napi_schedule(&priv->napi);
+ __napi_schedule_irqoff(&priv->napi);
}
}
@@ -916,7 +894,7 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
if (likely(napi_schedule_prep(&txr->napi))) {
intrl2_1_mask_set(priv, BIT(ring));
- __napi_schedule(&txr->napi);
+ __napi_schedule_irqoff(&txr->napi);
}
}
@@ -1117,7 +1095,7 @@ static void bcm_sysport_tx_timeout(struct net_device *dev)
{
netdev_warn(dev, "transmit timeout!\n");
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev->stats.tx_errors++;
netif_tx_wake_all_queues(dev);
@@ -1127,7 +1105,7 @@ static void bcm_sysport_tx_timeout(struct net_device *dev)
static void bcm_sysport_adj_link(struct net_device *dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
unsigned int changed = 0;
u32 cmd_bits = 0, reg;
@@ -1182,7 +1160,7 @@ static void bcm_sysport_adj_link(struct net_device *dev)
umac_writel(priv, reg, UMAC_CMD);
}
- phy_print_status(priv->phydev);
+ phy_print_status(phydev);
}
static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
@@ -1525,7 +1503,7 @@ static void bcm_sysport_netif_start(struct net_device *dev)
/* Enable RX interrupt and TX ring full interrupt */
intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
- phy_start(priv->phydev);
+ phy_start(dev->phydev);
/* Enable TX interrupts for the 32 TXQs */
intrl2_1_mask_clear(priv, 0xffffffff);
@@ -1546,6 +1524,7 @@ static void rbuf_init(struct bcm_sysport_priv *priv)
static int bcm_sysport_open(struct net_device *dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev;
unsigned int i;
int ret;
@@ -1570,9 +1549,9 @@ static int bcm_sysport_open(struct net_device *dev)
/* Read CRC forward */
priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
- priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
- 0, priv->phy_interface);
- if (!priv->phydev) {
+ phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
+ 0, priv->phy_interface);
+ if (!phydev) {
netdev_err(dev, "could not attach to PHY\n");
return -ENODEV;
}
@@ -1650,7 +1629,7 @@ out_free_tx_ring:
out_free_irq0:
free_irq(priv->irq0, dev);
out_phy_disconnect:
- phy_disconnect(priv->phydev);
+ phy_disconnect(phydev);
return ret;
}
@@ -1661,7 +1640,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
/* stop all software from updating hardware */
netif_tx_stop_all_queues(dev);
napi_disable(&priv->napi);
- phy_stop(priv->phydev);
+ phy_stop(dev->phydev);
/* mask all interrupts */
intrl2_0_mask_set(priv, 0xffffffff);
@@ -1708,14 +1687,12 @@ static int bcm_sysport_stop(struct net_device *dev)
free_irq(priv->irq1, dev);
/* Disconnect from PHY */
- phy_disconnect(priv->phydev);
+ phy_disconnect(dev->phydev);
return 0;
}
static struct ethtool_ops bcm_sysport_ethtool_ops = {
- .get_settings = bcm_sysport_get_settings,
- .set_settings = bcm_sysport_set_settings,
.get_drvinfo = bcm_sysport_get_drvinfo,
.get_msglevel = bcm_sysport_get_msglvl,
.set_msglevel = bcm_sysport_set_msglvl,
@@ -1727,6 +1704,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = {
.set_wol = bcm_sysport_set_wol,
.get_coalesce = bcm_sysport_get_coalesce,
.set_coalesce = bcm_sysport_set_coalesce,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops bcm_sysport_netdev_ops = {
@@ -1929,7 +1908,7 @@ static int bcm_sysport_suspend(struct device *d)
bcm_sysport_netif_stop(dev);
- phy_suspend(priv->phydev);
+ phy_suspend(dev->phydev);
netif_device_detach(dev);
@@ -2055,7 +2034,7 @@ static int bcm_sysport_resume(struct device *d)
goto out_free_rx_ring;
}
- phy_resume(priv->phydev);
+ phy_resume(dev->phydev);
bcm_sysport_netif_start(dev);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index f28bf545d7f4..1c82e3da69a7 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -670,7 +670,6 @@ struct bcm_sysport_priv {
/* PHY device */
struct device_node *phy_dn;
- struct phy_device *phydev;
phy_interface_t phy_interface;
int old_pause;
int old_link;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
new file mode 100644
index 000000000000..7c19c8e2bf91
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -0,0 +1,266 @@
+/*
+ * Driver for (BCM4706)? GBit MAC core on BCMA bus.
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bcma/bcma.h>
+#include <linux/brcmphy.h>
+#include "bgmac.h"
+
+struct bcma_mdio {
+ struct bcma_device *core;
+ u8 phyaddr;
+};
+
+static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask,
+ u32 value, int timeout)
+{
+ u32 val;
+ int i;
+
+ for (i = 0; i < timeout / 10; i++) {
+ val = bcma_read32(core, reg);
+ if ((val & mask) == value)
+ return true;
+ udelay(10);
+ }
+ dev_err(&core->dev, "Timeout waiting for reg 0x%X\n", reg);
+ return false;
+}
+
+/**************************************************
+ * PHY ops
+ **************************************************/
+
+static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg)
+{
+ struct bcma_device *core;
+ u16 phy_access_addr;
+ u16 phy_ctl_addr;
+ u32 tmp;
+
+ BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
+ BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
+ BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
+ BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
+ BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
+ BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
+ BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
+ BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
+ BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
+ BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
+ BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
+
+ if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bcma_mdio->core->bus->drv_gmac_cmn.core;
+ phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
+ phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
+ } else {
+ core = bcma_mdio->core;
+ phy_access_addr = BGMAC_PHY_ACCESS;
+ phy_ctl_addr = BGMAC_PHY_CNTL;
+ }
+
+ tmp = bcma_read32(core, phy_ctl_addr);
+ tmp &= ~BGMAC_PC_EPA_MASK;
+ tmp |= phyaddr;
+ bcma_write32(core, phy_ctl_addr, tmp);
+
+ tmp = BGMAC_PA_START;
+ tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
+ tmp |= reg << BGMAC_PA_REG_SHIFT;
+ bcma_write32(core, phy_access_addr, tmp);
+
+ if (!bcma_mdio_wait_value(core, phy_access_addr, BGMAC_PA_START, 0,
+ 1000)) {
+ dev_err(&core->dev, "Reading PHY %d register 0x%X failed\n",
+ phyaddr, reg);
+ return 0xffff;
+ }
+
+ return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
+static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
+ u16 value)
+{
+ struct bcma_device *core;
+ u16 phy_access_addr;
+ u16 phy_ctl_addr;
+ u32 tmp;
+
+ if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bcma_mdio->core->bus->drv_gmac_cmn.core;
+ phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
+ phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
+ } else {
+ core = bcma_mdio->core;
+ phy_access_addr = BGMAC_PHY_ACCESS;
+ phy_ctl_addr = BGMAC_PHY_CNTL;
+ }
+
+ tmp = bcma_read32(core, phy_ctl_addr);
+ tmp &= ~BGMAC_PC_EPA_MASK;
+ tmp |= phyaddr;
+ bcma_write32(core, phy_ctl_addr, tmp);
+
+ bcma_write32(bcma_mdio->core, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
+ if (bcma_read32(bcma_mdio->core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
+ dev_warn(&core->dev, "Error setting MDIO int\n");
+
+ tmp = BGMAC_PA_START;
+ tmp |= BGMAC_PA_WRITE;
+ tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
+ tmp |= reg << BGMAC_PA_REG_SHIFT;
+ tmp |= value;
+ bcma_write32(core, phy_access_addr, tmp);
+
+ if (!bcma_mdio_wait_value(core, phy_access_addr, BGMAC_PA_START, 0,
+ 1000)) {
+ dev_err(&core->dev, "Writing to PHY %d register 0x%X failed\n",
+ phyaddr, reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
+static void bcma_mdio_phy_init(struct bcma_mdio *bcma_mdio)
+{
+ struct bcma_chipinfo *ci = &bcma_mdio->core->bus->chipinfo;
+ u8 i;
+
+ if (ci->id == BCMA_CHIP_ID_BCM5356) {
+ for (i = 0; i < 5; i++) {
+ bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x008b);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x15, 0x0100);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x12, 0x2aaa);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
+ }
+ }
+ if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
+ (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
+ struct bcma_drv_cc *cc = &bcma_mdio->core->bus->drv_cc;
+
+ bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
+ bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
+ for (i = 0; i < 5; i++) {
+ bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5284);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x0010);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5296);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x1073);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9073);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x52b6);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9273);
+ bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
+static int bcma_mdio_phy_reset(struct mii_bus *bus)
+{
+ struct bcma_mdio *bcma_mdio = bus->priv;
+ u8 phyaddr = bcma_mdio->phyaddr;
+
+ if (bcma_mdio->phyaddr == BGMAC_PHY_NOREGS)
+ return 0;
+
+ bcma_mdio_phy_write(bcma_mdio, phyaddr, MII_BMCR, BMCR_RESET);
+ udelay(100);
+ if (bcma_mdio_phy_read(bcma_mdio, phyaddr, MII_BMCR) & BMCR_RESET)
+ dev_err(&bcma_mdio->core->dev, "PHY reset failed\n");
+ bcma_mdio_phy_init(bcma_mdio);
+
+ return 0;
+}
+
+/**************************************************
+ * MII
+ **************************************************/
+
+static int bcma_mdio_mii_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ return bcma_mdio_phy_read(bus->priv, mii_id, regnum);
+}
+
+static int bcma_mdio_mii_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ return bcma_mdio_phy_write(bus->priv, mii_id, regnum, value);
+}
+
+struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr)
+{
+ struct bcma_mdio *bcma_mdio;
+ struct mii_bus *mii_bus;
+ int err;
+
+ bcma_mdio = kzalloc(sizeof(*bcma_mdio), GFP_KERNEL);
+ if (!bcma_mdio)
+ return ERR_PTR(-ENOMEM);
+
+ mii_bus = mdiobus_alloc();
+ if (!mii_bus) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ mii_bus->name = "bcma_mdio mii bus";
+ sprintf(mii_bus->id, "%s-%d-%d", "bcma_mdio", core->bus->num,
+ core->core_unit);
+ mii_bus->priv = bcma_mdio;
+ mii_bus->read = bcma_mdio_mii_read;
+ mii_bus->write = bcma_mdio_mii_write;
+ mii_bus->reset = bcma_mdio_phy_reset;
+ mii_bus->parent = &core->dev;
+ mii_bus->phy_mask = ~(1 << phyaddr);
+
+ bcma_mdio->core = core;
+ bcma_mdio->phyaddr = phyaddr;
+
+ err = mdiobus_register(mii_bus);
+ if (err) {
+ dev_err(&core->dev, "Registration of mii bus failed\n");
+ goto err_free_bus;
+ }
+
+ return mii_bus;
+
+err_free_bus:
+ mdiobus_free(mii_bus);
+err:
+ kfree(bcma_mdio);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL_GPL(bcma_mdio_mii_register);
+
+void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
+{
+ struct bcma_mdio *bcma_mdio;
+
+ if (!mii_bus)
+ return;
+
+ bcma_mdio = mii_bus->priv;
+
+ mdiobus_unregister(mii_bus);
+ mdiobus_free(mii_bus);
+ kfree(bcma_mdio);
+}
+EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
new file mode 100644
index 000000000000..625235db644f
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -0,0 +1,315 @@
+/*
+ * Driver for (BCM4706)? GBit MAC core on BCMA bus.
+ *
+ * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bcma/bcma.h>
+#include <linux/brcmphy.h>
+#include <linux/etherdevice.h>
+#include "bgmac.h"
+
+static inline bool bgmac_is_bcm4707_family(struct bcma_device *core)
+{
+ switch (core->bus->chipinfo.id) {
+ case BCMA_CHIP_ID_BCM4707:
+ case BCMA_CHIP_ID_BCM47094:
+ case BCMA_CHIP_ID_BCM53018:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/**************************************************
+ * BCMA bus ops
+ **************************************************/
+
+static u32 bcma_bgmac_read(struct bgmac *bgmac, u16 offset)
+{
+ return bcma_read32(bgmac->bcma.core, offset);
+}
+
+static void bcma_bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ bcma_write32(bgmac->bcma.core, offset, value);
+}
+
+static u32 bcma_bgmac_idm_read(struct bgmac *bgmac, u16 offset)
+{
+ return bcma_aread32(bgmac->bcma.core, offset);
+}
+
+static void bcma_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ return bcma_awrite32(bgmac->bcma.core, offset, value);
+}
+
+static bool bcma_bgmac_clk_enabled(struct bgmac *bgmac)
+{
+ return bcma_core_is_enabled(bgmac->bcma.core);
+}
+
+static void bcma_bgmac_clk_enable(struct bgmac *bgmac, u32 flags)
+{
+ bcma_core_enable(bgmac->bcma.core, flags);
+}
+
+static void bcma_bgmac_cco_ctl_maskset(struct bgmac *bgmac, u32 offset,
+ u32 mask, u32 set)
+{
+ struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc;
+
+ bcma_chipco_chipctl_maskset(cc, offset, mask, set);
+}
+
+static u32 bcma_bgmac_get_bus_clock(struct bgmac *bgmac)
+{
+ struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc;
+
+ return bcma_pmu_get_bus_clock(cc);
+}
+
+static void bcma_bgmac_cmn_maskset32(struct bgmac *bgmac, u16 offset, u32 mask,
+ u32 set)
+{
+ bcma_maskset32(bgmac->bcma.cmn, offset, mask, set);
+}
+
+static const struct bcma_device_id bgmac_bcma_tbl[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT,
+ BCMA_ANY_REV, BCMA_ANY_CLASS),
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV,
+ BCMA_ANY_CLASS),
+ {},
+};
+MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
+
+/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
+static int bgmac_probe(struct bcma_device *core)
+{
+ struct ssb_sprom *sprom = &core->bus->sprom;
+ struct mii_bus *mii_bus;
+ struct bgmac *bgmac;
+ u8 *mac;
+ int err;
+
+ bgmac = kzalloc(sizeof(*bgmac), GFP_KERNEL);
+ if (!bgmac)
+ return -ENOMEM;
+
+ bgmac->bcma.core = core;
+ bgmac->dev = &core->dev;
+ bgmac->dma_dev = core->dma_dev;
+ bgmac->irq = core->irq;
+
+ bcma_set_drvdata(core, bgmac);
+
+ switch (core->core_unit) {
+ case 0:
+ mac = sprom->et0mac;
+ break;
+ case 1:
+ mac = sprom->et1mac;
+ break;
+ case 2:
+ mac = sprom->et2mac;
+ break;
+ default:
+ dev_err(bgmac->dev, "Unsupported core_unit %d\n",
+ core->core_unit);
+ err = -ENOTSUPP;
+ goto err;
+ }
+
+ ether_addr_copy(bgmac->mac_addr, mac);
+
+ /* On BCM4706 we need common core to access PHY */
+ if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
+ !core->bus->drv_gmac_cmn.core) {
+ dev_err(bgmac->dev, "GMAC CMN core not found (required for BCM4706)\n");
+ err = -ENODEV;
+ goto err;
+ }
+ bgmac->bcma.cmn = core->bus->drv_gmac_cmn.core;
+
+ switch (core->core_unit) {
+ case 0:
+ bgmac->phyaddr = sprom->et0phyaddr;
+ break;
+ case 1:
+ bgmac->phyaddr = sprom->et1phyaddr;
+ break;
+ case 2:
+ bgmac->phyaddr = sprom->et2phyaddr;
+ break;
+ }
+ bgmac->phyaddr &= BGMAC_PHY_MASK;
+ if (bgmac->phyaddr == BGMAC_PHY_MASK) {
+ dev_err(bgmac->dev, "No PHY found\n");
+ err = -ENODEV;
+ goto err;
+ }
+ dev_info(bgmac->dev, "Found PHY addr: %d%s\n", bgmac->phyaddr,
+ bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
+
+ if (!bgmac_is_bcm4707_family(core)) {
+ mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr);
+ if (IS_ERR(mii_bus)) {
+ err = PTR_ERR(mii_bus);
+ goto err;
+ }
+
+ bgmac->mii_bus = mii_bus;
+ }
+
+ if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
+ dev_err(bgmac->dev, "PCI setup not implemented\n");
+ err = -ENOTSUPP;
+ goto err1;
+ }
+
+ bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
+ BGMAC_BFL_ENETROBO);
+ if (bgmac->has_robosw)
+ dev_warn(bgmac->dev, "Support for Roboswitch not implemented\n");
+
+ if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
+ dev_warn(bgmac->dev, "Support for ADMtek ethernet switch not implemented\n");
+
+ /* Feature Flags */
+ switch (core->bus->chipinfo.id) {
+ case BCMA_CHIP_ID_BCM5357:
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
+ if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47186) {
+ bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ }
+ if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM5358)
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_EPHYRMII;
+ break;
+ case BCMA_CHIP_ID_BCM53572:
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
+ if (core->bus->chipinfo.pkg == BCMA_PKG_ID_BCM47188) {
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ }
+ break;
+ case BCMA_CHIP_ID_BCM4749:
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL1;
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_PHY;
+ if (core->bus->chipinfo.pkg == 10) {
+ bgmac->feature_flags |= BGMAC_FEAT_SW_TYPE_RGMII;
+ bgmac->feature_flags |= BGMAC_FEAT_IOST_ATTACHED;
+ }
+ break;
+ case BCMA_CHIP_ID_BCM4716:
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ /* fallthrough */
+ case BCMA_CHIP_ID_BCM47162:
+ bgmac->feature_flags |= BGMAC_FEAT_FLW_CTRL2;
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ break;
+ /* bcm4707_family */
+ case BCMA_CHIP_ID_BCM4707:
+ case BCMA_CHIP_ID_BCM47094:
+ case BCMA_CHIP_ID_BCM53018:
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ bgmac->feature_flags |= BGMAC_FEAT_NO_RESET;
+ bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500;
+ break;
+ default:
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ bgmac->feature_flags |= BGMAC_FEAT_SET_RXQ_CLK;
+ }
+
+ if (!bgmac_is_bcm4707_family(core) && core->id.rev > 2)
+ bgmac->feature_flags |= BGMAC_FEAT_MISC_PLL_REQ;
+
+ if (core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ bgmac->feature_flags |= BGMAC_FEAT_CMN_PHY_CTL;
+ bgmac->feature_flags |= BGMAC_FEAT_NO_CLR_MIB;
+ }
+
+ if (core->id.rev >= 4) {
+ bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4;
+ bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP;
+ bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP;
+ }
+
+ bgmac->read = bcma_bgmac_read;
+ bgmac->write = bcma_bgmac_write;
+ bgmac->idm_read = bcma_bgmac_idm_read;
+ bgmac->idm_write = bcma_bgmac_idm_write;
+ bgmac->clk_enabled = bcma_bgmac_clk_enabled;
+ bgmac->clk_enable = bcma_bgmac_clk_enable;
+ bgmac->cco_ctl_maskset = bcma_bgmac_cco_ctl_maskset;
+ bgmac->get_bus_clock = bcma_bgmac_get_bus_clock;
+ bgmac->cmn_maskset32 = bcma_bgmac_cmn_maskset32;
+
+ err = bgmac_enet_probe(bgmac);
+ if (err)
+ goto err1;
+
+ return 0;
+
+err1:
+ bcma_mdio_mii_unregister(bgmac->mii_bus);
+err:
+ kfree(bgmac);
+ bcma_set_drvdata(core, NULL);
+
+ return err;
+}
+
+static void bgmac_remove(struct bcma_device *core)
+{
+ struct bgmac *bgmac = bcma_get_drvdata(core);
+
+ bcma_mdio_mii_unregister(bgmac->mii_bus);
+ bgmac_enet_remove(bgmac);
+ bcma_set_drvdata(core, NULL);
+ kfree(bgmac);
+}
+
+static struct bcma_driver bgmac_bcma_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = bgmac_bcma_tbl,
+ .probe = bgmac_probe,
+ .remove = bgmac_remove,
+};
+
+static int __init bgmac_init(void)
+{
+ int err;
+
+ err = bcma_driver_register(&bgmac_bcma_driver);
+ if (err)
+ return err;
+ pr_info("Broadcom 47xx GBit MAC driver loaded\n");
+
+ return 0;
+}
+
+static void __exit bgmac_exit(void)
+{
+ bcma_driver_unregister(&bgmac_bcma_driver);
+}
+
+module_init(bgmac_init)
+module_exit(bgmac_exit)
+
+MODULE_AUTHOR("Rafał Miłecki");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
new file mode 100644
index 000000000000..be52f270c2c1
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bcma/bcma.h>
+#include <linux/etherdevice.h>
+#include <linux/of_address.h>
+#include <linux/of_net.h>
+#include "bgmac.h"
+
+static u32 platform_bgmac_read(struct bgmac *bgmac, u16 offset)
+{
+ return readl(bgmac->plat.base + offset);
+}
+
+static void platform_bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ writel(value, bgmac->plat.base + offset);
+}
+
+static u32 platform_bgmac_idm_read(struct bgmac *bgmac, u16 offset)
+{
+ return readl(bgmac->plat.idm_base + offset);
+}
+
+static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ return writel(value, bgmac->plat.idm_base + offset);
+}
+
+static bool platform_bgmac_clk_enabled(struct bgmac *bgmac)
+{
+ if ((bgmac_idm_read(bgmac, BCMA_IOCTL) &
+ (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC)) != BCMA_IOCTL_CLK)
+ return false;
+ if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET)
+ return false;
+ return true;
+}
+
+static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags)
+{
+ bgmac_idm_write(bgmac, BCMA_IOCTL,
+ (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags));
+ bgmac_idm_read(bgmac, BCMA_IOCTL);
+
+ bgmac_idm_write(bgmac, BCMA_RESET_CTL, 0);
+ bgmac_idm_read(bgmac, BCMA_RESET_CTL);
+ udelay(1);
+
+ bgmac_idm_write(bgmac, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags));
+ bgmac_idm_read(bgmac, BCMA_IOCTL);
+ udelay(1);
+}
+
+static void platform_bgmac_cco_ctl_maskset(struct bgmac *bgmac, u32 offset,
+ u32 mask, u32 set)
+{
+ /* This shouldn't be encountered */
+ WARN_ON(1);
+}
+
+static u32 platform_bgmac_get_bus_clock(struct bgmac *bgmac)
+{
+ /* This shouldn't be encountered */
+ WARN_ON(1);
+
+ return 0;
+}
+
+static void platform_bgmac_cmn_maskset32(struct bgmac *bgmac, u16 offset,
+ u32 mask, u32 set)
+{
+ /* This shouldn't be encountered */
+ WARN_ON(1);
+}
+
+static int bgmac_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct bgmac *bgmac;
+ struct resource *regs;
+ const u8 *mac_addr;
+
+ bgmac = devm_kzalloc(&pdev->dev, sizeof(*bgmac), GFP_KERNEL);
+ if (!bgmac)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, bgmac);
+
+ /* Set the features of the 4707 family */
+ bgmac->feature_flags |= BGMAC_FEAT_CLKCTLST;
+ bgmac->feature_flags |= BGMAC_FEAT_NO_RESET;
+ bgmac->feature_flags |= BGMAC_FEAT_FORCE_SPEED_2500;
+ bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4;
+ bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP;
+ bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP;
+
+ bgmac->dev = &pdev->dev;
+ bgmac->dma_dev = &pdev->dev;
+
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ ether_addr_copy(bgmac->mac_addr, mac_addr);
+ else
+ dev_warn(&pdev->dev, "MAC address not present in device tree\n");
+
+ bgmac->irq = platform_get_irq(pdev, 0);
+ if (bgmac->irq < 0) {
+ dev_err(&pdev->dev, "Unable to obtain IRQ\n");
+ return bgmac->irq;
+ }
+
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "amac_base");
+ if (!regs) {
+ dev_err(&pdev->dev, "Unable to obtain base resource\n");
+ return -EINVAL;
+ }
+
+ bgmac->plat.base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(bgmac->plat.base))
+ return PTR_ERR(bgmac->plat.base);
+
+ regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base");
+ if (!regs) {
+ dev_err(&pdev->dev, "Unable to obtain idm resource\n");
+ return -EINVAL;
+ }
+
+ bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(bgmac->plat.idm_base))
+ return PTR_ERR(bgmac->plat.idm_base);
+
+ bgmac->read = platform_bgmac_read;
+ bgmac->write = platform_bgmac_write;
+ bgmac->idm_read = platform_bgmac_idm_read;
+ bgmac->idm_write = platform_bgmac_idm_write;
+ bgmac->clk_enabled = platform_bgmac_clk_enabled;
+ bgmac->clk_enable = platform_bgmac_clk_enable;
+ bgmac->cco_ctl_maskset = platform_bgmac_cco_ctl_maskset;
+ bgmac->get_bus_clock = platform_bgmac_get_bus_clock;
+ bgmac->cmn_maskset32 = platform_bgmac_cmn_maskset32;
+
+ return bgmac_enet_probe(bgmac);
+}
+
+static int bgmac_remove(struct platform_device *pdev)
+{
+ struct bgmac *bgmac = platform_get_drvdata(pdev);
+
+ bgmac_enet_remove(bgmac);
+
+ return 0;
+}
+
+static const struct of_device_id bgmac_of_enet_match[] = {
+ {.compatible = "brcm,amac",},
+ {.compatible = "brcm,nsp-amac",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, bgmac_of_enet_match);
+
+static struct platform_driver bgmac_enet_driver = {
+ .driver = {
+ .name = "bgmac-enet",
+ .of_match_table = bgmac_of_enet_match,
+ },
+ .probe = bgmac_probe,
+ .remove = bgmac_remove,
+};
+
+module_platform_driver(bgmac_enet_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 38db2e4d7d54..c4751ece76f6 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -6,51 +6,27 @@
* Licensed under the GNU/GPL. See COPYING for details.
*/
-#include "bgmac.h"
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/delay.h>
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bcma/bcma.h>
#include <linux/etherdevice.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/phy_fixed.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
#include <linux/bcm47xx_nvram.h>
+#include "bgmac.h"
-static const struct bcma_device_id bgmac_bcma_tbl[] = {
- BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
- BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS),
- {},
-};
-MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl);
-
-static inline bool bgmac_is_bcm4707_family(struct bgmac *bgmac)
-{
- switch (bgmac->core->bus->chipinfo.id) {
- case BCMA_CHIP_ID_BCM4707:
- case BCMA_CHIP_ID_BCM47094:
- case BCMA_CHIP_ID_BCM53018:
- return true;
- default:
- return false;
- }
-}
-
-static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask,
+static bool bgmac_wait_value(struct bgmac *bgmac, u16 reg, u32 mask,
u32 value, int timeout)
{
u32 val;
int i;
for (i = 0; i < timeout / 10; i++) {
- val = bcma_read32(core, reg);
+ val = bgmac_read(bgmac, reg);
if ((val & mask) == value)
return true;
udelay(10);
}
- pr_err("Timeout waiting for reg 0x%X\n", reg);
+ dev_err(bgmac->dev, "Timeout waiting for reg 0x%X\n", reg);
return false;
}
@@ -84,22 +60,22 @@ static void bgmac_dma_tx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
udelay(10);
}
if (i)
- bgmac_err(bgmac, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
- ring->mmio_base, val);
+ dev_err(bgmac->dev, "Timeout suspending DMA TX ring 0x%X (BGMAC_DMA_TX_STAT: 0x%08X)\n",
+ ring->mmio_base, val);
/* Remove SUSPEND bit */
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, 0);
- if (!bgmac_wait_value(bgmac->core,
+ if (!bgmac_wait_value(bgmac,
ring->mmio_base + BGMAC_DMA_TX_STATUS,
BGMAC_DMA_TX_STAT, BGMAC_DMA_TX_STAT_DISABLED,
10000)) {
- bgmac_warn(bgmac, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
- ring->mmio_base);
+ dev_warn(bgmac->dev, "DMA TX ring 0x%X wasn't disabled on time, waiting additional 300us\n",
+ ring->mmio_base);
udelay(300);
val = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
if ((val & BGMAC_DMA_TX_STAT) != BGMAC_DMA_TX_STAT_DISABLED)
- bgmac_err(bgmac, "Reset of DMA TX ring 0x%X failed\n",
- ring->mmio_base);
+ dev_err(bgmac->dev, "Reset of DMA TX ring 0x%X failed\n",
+ ring->mmio_base);
}
}
@@ -109,7 +85,7 @@ static void bgmac_dma_tx_enable(struct bgmac *bgmac,
u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL);
- if (bgmac->core->id.rev >= 4) {
+ if (bgmac->feature_flags & BGMAC_FEAT_TX_MASK_SETUP) {
ctl &= ~BGMAC_DMA_TX_BL_MASK;
ctl |= BGMAC_DMA_TX_BL_128 << BGMAC_DMA_TX_BL_SHIFT;
@@ -152,7 +128,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
struct bgmac_dma_ring *ring,
struct sk_buff *skb)
{
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
struct net_device *net_dev = bgmac->net_dev;
int index = ring->end % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[index];
@@ -161,7 +137,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
int i;
if (skb->len > BGMAC_DESC_CTL1_LEN) {
- bgmac_err(bgmac, "Too long skb (%d)\n", skb->len);
+ netdev_err(bgmac->net_dev, "Too long skb (%d)\n", skb->len);
goto err_drop;
}
@@ -174,7 +150,7 @@ static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac,
* even when ring->end overflows
*/
if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) {
- bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n");
+ netdev_err(bgmac->net_dev, "TX ring is full, queue should be stopped!\n");
netif_stop_queue(net_dev);
return NETDEV_TX_BUSY;
}
@@ -231,7 +207,7 @@ err_dma:
dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb),
DMA_TO_DEVICE);
- while (i > 0) {
+ while (i-- > 0) {
int index = (ring->end + i) % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[index];
u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1);
@@ -241,18 +217,20 @@ err_dma:
}
err_dma_head:
- bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n",
- ring->mmio_base);
+ netdev_err(bgmac->net_dev, "Mapping error of skb on ring 0x%X\n",
+ ring->mmio_base);
err_drop:
dev_kfree_skb(skb);
+ net_dev->stats.tx_dropped++;
+ net_dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
/* Free transmitted packets */
static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
{
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
int empty_slot;
bool freed = false;
unsigned bytes_compl = 0, pkts_compl = 0;
@@ -267,15 +245,16 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
while (ring->start != ring->end) {
int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[slot_idx];
- u32 ctl1;
+ u32 ctl0, ctl1;
int len;
if (slot_idx == empty_slot)
break;
+ ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
len = ctl1 & BGMAC_DESC_CTL1_LEN;
- if (ctl1 & BGMAC_DESC_CTL0_SOF)
+ if (ctl0 & BGMAC_DESC_CTL0_SOF)
/* Unmap no longer used buffer */
dma_unmap_single(dma_dev, slot->dma_addr, len,
DMA_TO_DEVICE);
@@ -284,6 +263,8 @@ static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
DMA_TO_DEVICE);
if (slot->skb) {
+ bgmac->net_dev->stats.tx_bytes += slot->skb->len;
+ bgmac->net_dev->stats.tx_packets++;
bytes_compl += slot->skb->len;
pkts_compl++;
@@ -312,12 +293,12 @@ static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring)
return;
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, 0);
- if (!bgmac_wait_value(bgmac->core,
+ if (!bgmac_wait_value(bgmac,
ring->mmio_base + BGMAC_DMA_RX_STATUS,
BGMAC_DMA_RX_STAT, BGMAC_DMA_RX_STAT_DISABLED,
10000))
- bgmac_err(bgmac, "Reset of ring 0x%X RX failed\n",
- ring->mmio_base);
+ dev_err(bgmac->dev, "Reset of ring 0x%X RX failed\n",
+ ring->mmio_base);
}
static void bgmac_dma_rx_enable(struct bgmac *bgmac,
@@ -326,7 +307,7 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
u32 ctl;
ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL);
- if (bgmac->core->id.rev >= 4) {
+ if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) {
ctl &= ~BGMAC_DMA_RX_BL_MASK;
ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT;
@@ -347,7 +328,7 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac,
static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
struct bgmac_slot_info *slot)
{
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
dma_addr_t dma_addr;
struct bgmac_rx_header *rx;
void *buf;
@@ -366,7 +347,7 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac,
dma_addr = dma_map_single(dma_dev, buf + BGMAC_RX_BUF_OFFSET,
BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dma_dev, dma_addr)) {
- bgmac_err(bgmac, "DMA mapping error\n");
+ netdev_err(bgmac->net_dev, "DMA mapping error\n");
put_page(virt_to_head_page(buf));
return -ENOMEM;
}
@@ -436,7 +417,7 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
end_slot /= sizeof(struct bgmac_dma_desc);
while (ring->start != end_slot) {
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
struct bgmac_slot_info *slot = &ring->slots[ring->start];
struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
struct sk_buff *skb;
@@ -461,16 +442,19 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
/* Check for poison and drop or pass the packet */
if (len == 0xdead && flags == 0xbeef) {
- bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
- ring->start);
+ netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n",
+ ring->start);
put_page(virt_to_head_page(buf));
+ bgmac->net_dev->stats.rx_errors++;
break;
}
if (len > BGMAC_RX_ALLOC_SIZE) {
- bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n",
- ring->start);
+ netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n",
+ ring->start);
put_page(virt_to_head_page(buf));
+ bgmac->net_dev->stats.rx_length_errors++;
+ bgmac->net_dev->stats.rx_errors++;
break;
}
@@ -479,8 +463,9 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
if (unlikely(!skb)) {
- bgmac_err(bgmac, "build_skb failed\n");
+ netdev_err(bgmac->net_dev, "build_skb failed\n");
put_page(virt_to_head_page(buf));
+ bgmac->net_dev->stats.rx_errors++;
break;
}
skb_put(skb, BGMAC_RX_FRAME_OFFSET +
@@ -490,6 +475,8 @@ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
skb_checksum_none_assert(skb);
skb->protocol = eth_type_trans(skb, bgmac->net_dev);
+ bgmac->net_dev->stats.rx_bytes += len;
+ bgmac->net_dev->stats.rx_packets++;
napi_gro_receive(&bgmac->napi, skb);
handled++;
} while (0);
@@ -533,7 +520,7 @@ static bool bgmac_dma_unaligned(struct bgmac *bgmac,
static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
struct bgmac_dma_ring *ring)
{
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
struct bgmac_dma_desc *dma_desc = ring->cpu_base;
struct bgmac_slot_info *slot;
int i;
@@ -559,7 +546,7 @@ static void bgmac_dma_tx_ring_free(struct bgmac *bgmac,
static void bgmac_dma_rx_ring_free(struct bgmac *bgmac,
struct bgmac_dma_ring *ring)
{
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
struct bgmac_slot_info *slot;
int i;
@@ -580,7 +567,7 @@ static void bgmac_dma_ring_desc_free(struct bgmac *bgmac,
struct bgmac_dma_ring *ring,
int num_slots)
{
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
int size;
if (!ring->cpu_base)
@@ -618,7 +605,7 @@ static void bgmac_dma_free(struct bgmac *bgmac)
static int bgmac_dma_alloc(struct bgmac *bgmac)
{
- struct device *dma_dev = bgmac->core->dma_dev;
+ struct device *dma_dev = bgmac->dma_dev;
struct bgmac_dma_ring *ring;
static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1,
BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, };
@@ -629,8 +616,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base));
BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base));
- if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) {
- bgmac_err(bgmac, "Core does not report 64-bit DMA\n");
+ if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) {
+ dev_err(bgmac->dev, "Core does not report 64-bit DMA\n");
return -ENOTSUPP;
}
@@ -644,8 +631,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
&ring->dma_base,
GFP_KERNEL);
if (!ring->cpu_base) {
- bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n",
- ring->mmio_base);
+ dev_err(bgmac->dev, "Allocation of TX ring 0x%X failed\n",
+ ring->mmio_base);
goto err_dma_free;
}
@@ -669,8 +656,8 @@ static int bgmac_dma_alloc(struct bgmac *bgmac)
&ring->dma_base,
GFP_KERNEL);
if (!ring->cpu_base) {
- bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n",
- ring->mmio_base);
+ dev_err(bgmac->dev, "Allocation of RX ring 0x%X failed\n",
+ ring->mmio_base);
err = -ENOMEM;
goto err_dma_free;
}
@@ -745,150 +732,6 @@ error:
return err;
}
-/**************************************************
- * PHY ops
- **************************************************/
-
-static u16 bgmac_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
-{
- struct bcma_device *core;
- u16 phy_access_addr;
- u16 phy_ctl_addr;
- u32 tmp;
-
- BUILD_BUG_ON(BGMAC_PA_DATA_MASK != BCMA_GMAC_CMN_PA_DATA_MASK);
- BUILD_BUG_ON(BGMAC_PA_ADDR_MASK != BCMA_GMAC_CMN_PA_ADDR_MASK);
- BUILD_BUG_ON(BGMAC_PA_ADDR_SHIFT != BCMA_GMAC_CMN_PA_ADDR_SHIFT);
- BUILD_BUG_ON(BGMAC_PA_REG_MASK != BCMA_GMAC_CMN_PA_REG_MASK);
- BUILD_BUG_ON(BGMAC_PA_REG_SHIFT != BCMA_GMAC_CMN_PA_REG_SHIFT);
- BUILD_BUG_ON(BGMAC_PA_WRITE != BCMA_GMAC_CMN_PA_WRITE);
- BUILD_BUG_ON(BGMAC_PA_START != BCMA_GMAC_CMN_PA_START);
- BUILD_BUG_ON(BGMAC_PC_EPA_MASK != BCMA_GMAC_CMN_PC_EPA_MASK);
- BUILD_BUG_ON(BGMAC_PC_MCT_MASK != BCMA_GMAC_CMN_PC_MCT_MASK);
- BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
- BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
-
- if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
- core = bgmac->core->bus->drv_gmac_cmn.core;
- phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
- phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
- } else {
- core = bgmac->core;
- phy_access_addr = BGMAC_PHY_ACCESS;
- phy_ctl_addr = BGMAC_PHY_CNTL;
- }
-
- tmp = bcma_read32(core, phy_ctl_addr);
- tmp &= ~BGMAC_PC_EPA_MASK;
- tmp |= phyaddr;
- bcma_write32(core, phy_ctl_addr, tmp);
-
- tmp = BGMAC_PA_START;
- tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
- tmp |= reg << BGMAC_PA_REG_SHIFT;
- bcma_write32(core, phy_access_addr, tmp);
-
- if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
- bgmac_err(bgmac, "Reading PHY %d register 0x%X failed\n",
- phyaddr, reg);
- return 0xffff;
- }
-
- return bcma_read32(core, phy_access_addr) & BGMAC_PA_DATA_MASK;
-}
-
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
-static int bgmac_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg, u16 value)
-{
- struct bcma_device *core;
- u16 phy_access_addr;
- u16 phy_ctl_addr;
- u32 tmp;
-
- if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
- core = bgmac->core->bus->drv_gmac_cmn.core;
- phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
- phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
- } else {
- core = bgmac->core;
- phy_access_addr = BGMAC_PHY_ACCESS;
- phy_ctl_addr = BGMAC_PHY_CNTL;
- }
-
- tmp = bcma_read32(core, phy_ctl_addr);
- tmp &= ~BGMAC_PC_EPA_MASK;
- tmp |= phyaddr;
- bcma_write32(core, phy_ctl_addr, tmp);
-
- bgmac_write(bgmac, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
- if (bgmac_read(bgmac, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
- bgmac_warn(bgmac, "Error setting MDIO int\n");
-
- tmp = BGMAC_PA_START;
- tmp |= BGMAC_PA_WRITE;
- tmp |= phyaddr << BGMAC_PA_ADDR_SHIFT;
- tmp |= reg << BGMAC_PA_REG_SHIFT;
- tmp |= value;
- bcma_write32(core, phy_access_addr, tmp);
-
- if (!bgmac_wait_value(core, phy_access_addr, BGMAC_PA_START, 0, 1000)) {
- bgmac_err(bgmac, "Writing to PHY %d register 0x%X failed\n",
- phyaddr, reg);
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
-static void bgmac_phy_init(struct bgmac *bgmac)
-{
- struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
- struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
- u8 i;
-
- if (ci->id == BCMA_CHIP_ID_BCM5356) {
- for (i = 0; i < 5; i++) {
- bgmac_phy_write(bgmac, i, 0x1f, 0x008b);
- bgmac_phy_write(bgmac, i, 0x15, 0x0100);
- bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
- bgmac_phy_write(bgmac, i, 0x12, 0x2aaa);
- bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
- }
- }
- if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
- (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
- (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
- bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
- bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
- for (i = 0; i < 5; i++) {
- bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
- bgmac_phy_write(bgmac, i, 0x16, 0x5284);
- bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
- bgmac_phy_write(bgmac, i, 0x17, 0x0010);
- bgmac_phy_write(bgmac, i, 0x1f, 0x000f);
- bgmac_phy_write(bgmac, i, 0x16, 0x5296);
- bgmac_phy_write(bgmac, i, 0x17, 0x1073);
- bgmac_phy_write(bgmac, i, 0x17, 0x9073);
- bgmac_phy_write(bgmac, i, 0x16, 0x52b6);
- bgmac_phy_write(bgmac, i, 0x17, 0x9273);
- bgmac_phy_write(bgmac, i, 0x1f, 0x000b);
- }
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
-static void bgmac_phy_reset(struct bgmac *bgmac)
-{
- if (bgmac->phyaddr == BGMAC_PHY_NOREGS)
- return;
-
- bgmac_phy_write(bgmac, bgmac->phyaddr, MII_BMCR, BMCR_RESET);
- udelay(100);
- if (bgmac_phy_read(bgmac, bgmac->phyaddr, MII_BMCR) & BMCR_RESET)
- bgmac_err(bgmac, "PHY reset failed\n");
- bgmac_phy_init(bgmac);
-}
/**************************************************
* Chip ops
@@ -902,14 +745,20 @@ static void bgmac_cmdcfg_maskset(struct bgmac *bgmac, u32 mask, u32 set,
{
u32 cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
u32 new_val = (cmdcfg & mask) | set;
+ u32 cmdcfg_sr;
- bgmac_set(bgmac, BGMAC_CMDCFG, BGMAC_CMDCFG_SR(bgmac->core->id.rev));
+ if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
+ cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ else
+ cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
+
+ bgmac_set(bgmac, BGMAC_CMDCFG, cmdcfg_sr);
udelay(2);
if (new_val != cmdcfg || force)
bgmac_write(bgmac, BGMAC_CMDCFG, new_val);
- bgmac_mask(bgmac, BGMAC_CMDCFG, ~BGMAC_CMDCFG_SR(bgmac->core->id.rev));
+ bgmac_mask(bgmac, BGMAC_CMDCFG, ~cmdcfg_sr);
udelay(2);
}
@@ -938,7 +787,7 @@ static void bgmac_chip_stats_update(struct bgmac *bgmac)
{
int i;
- if (bgmac->core->id.id != BCMA_CORE_4706_MAC_GBIT) {
+ if (!(bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)) {
for (i = 0; i < BGMAC_NUM_MIB_TX_REGS; i++)
bgmac->mib_tx_regs[i] =
bgmac_read(bgmac,
@@ -957,7 +806,7 @@ static void bgmac_clear_mib(struct bgmac *bgmac)
{
int i;
- if (bgmac->core->id.id == BCMA_CORE_4706_MAC_GBIT)
+ if (bgmac->feature_flags & BGMAC_FEAT_NO_CLR_MIB)
return;
bgmac_set(bgmac, BGMAC_DEV_CTL, BGMAC_DC_MROR);
@@ -987,7 +836,8 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
set |= BGMAC_CMDCFG_ES_2500;
break;
default:
- bgmac_err(bgmac, "Unsupported speed: %d\n", bgmac->mac_speed);
+ dev_err(bgmac->dev, "Unsupported speed: %d\n",
+ bgmac->mac_speed);
}
if (bgmac->mac_duplex == DUPLEX_HALF)
@@ -998,17 +848,16 @@ static void bgmac_mac_speed(struct bgmac *bgmac)
static void bgmac_miiconfig(struct bgmac *bgmac)
{
- struct bcma_device *core = bgmac->core;
- u8 imode;
-
- if (bgmac_is_bcm4707_family(bgmac)) {
- bcma_awrite32(core, BCMA_IOCTL,
- bcma_aread32(core, BCMA_IOCTL) | 0x40 |
- BGMAC_BCMA_IOCTL_SW_CLKEN);
+ if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) {
+ bgmac_idm_write(bgmac, BCMA_IOCTL,
+ bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 |
+ BGMAC_BCMA_IOCTL_SW_CLKEN);
bgmac->mac_speed = SPEED_2500;
bgmac->mac_duplex = DUPLEX_FULL;
bgmac_mac_speed(bgmac);
} else {
+ u8 imode;
+
imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) &
BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT;
if (imode == 0 || imode == 1) {
@@ -1022,14 +871,11 @@ static void bgmac_miiconfig(struct bgmac *bgmac)
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */
static void bgmac_chip_reset(struct bgmac *bgmac)
{
- struct bcma_device *core = bgmac->core;
- struct bcma_bus *bus = core->bus;
- struct bcma_chipinfo *ci = &bus->chipinfo;
- u32 flags;
+ u32 cmdcfg_sr;
u32 iost;
int i;
- if (bcma_core_is_enabled(core)) {
+ if (bgmac_clk_enabled(bgmac)) {
if (!bgmac->stats_grabbed) {
/* bgmac_chip_stats_update(bgmac); */
bgmac->stats_grabbed = true;
@@ -1047,38 +893,32 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
/* TODO: Clear software multicast filter list */
}
- iost = bcma_aread32(core, BCMA_IOST);
- if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
- (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
- (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188))
+ iost = bgmac_idm_read(bgmac, BCMA_IOST);
+ if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED)
iost &= ~BGMAC_BCMA_IOST_ATTACHED;
/* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */
- if (ci->id != BCMA_CHIP_ID_BCM4707 &&
- ci->id != BCMA_CHIP_ID_BCM47094) {
- flags = 0;
+ if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) {
+ u32 flags = 0;
if (iost & BGMAC_BCMA_IOST_ATTACHED) {
flags = BGMAC_BCMA_IOCTL_SW_CLKEN;
if (!bgmac->has_robosw)
flags |= BGMAC_BCMA_IOCTL_SW_RESET;
}
- bcma_core_enable(core, flags);
+ bgmac_clk_enable(bgmac, flags);
}
/* Request Misc PLL for corerev > 2 */
- if (core->id.rev > 2 && !bgmac_is_bcm4707_family(bgmac)) {
+ if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) {
bgmac_set(bgmac, BCMA_CLKCTLST,
BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ);
- bgmac_wait_value(bgmac->core, BCMA_CLKCTLST,
+ bgmac_wait_value(bgmac, BCMA_CLKCTLST,
BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
BGMAC_BCMA_CLKCTLST_MISC_PLL_ST,
1000);
}
- if (ci->id == BCMA_CHIP_ID_BCM5357 ||
- ci->id == BCMA_CHIP_ID_BCM4749 ||
- ci->id == BCMA_CHIP_ID_BCM53572) {
- struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc;
+ if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_PHY) {
u8 et_swtype = 0;
u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
BGMAC_CHIPCTL_1_IF_TYPE_MII;
@@ -1086,35 +926,37 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) {
if (kstrtou8(buf, 0, &et_swtype))
- bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
- buf);
+ dev_err(bgmac->dev, "Failed to parse et_swtype (%s)\n",
+ buf);
et_swtype &= 0x0f;
et_swtype <<= 4;
sw_type = et_swtype;
- } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) {
+ } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_EPHYRMII) {
sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII;
- } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) ||
- (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) ||
- (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) {
+ } else if (bgmac->feature_flags & BGMAC_FEAT_SW_TYPE_RGMII) {
sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII |
BGMAC_CHIPCTL_1_SW_TYPE_RGMII;
}
- bcma_chipco_chipctl_maskset(cc, 1,
- ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
- BGMAC_CHIPCTL_1_SW_TYPE_MASK),
- sw_type);
+ bgmac_cco_ctl_maskset(bgmac, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK |
+ BGMAC_CHIPCTL_1_SW_TYPE_MASK),
+ sw_type);
}
if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw)
- bcma_awrite32(core, BCMA_IOCTL,
- bcma_aread32(core, BCMA_IOCTL) &
- ~BGMAC_BCMA_IOCTL_SW_RESET);
+ bgmac_idm_write(bgmac, BCMA_IOCTL,
+ bgmac_idm_read(bgmac, BCMA_IOCTL) &
+ ~BGMAC_BCMA_IOCTL_SW_RESET);
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset
* Specs don't say about using BGMAC_CMDCFG_SR, but in this routine
* BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to
* be keps until taking MAC out of the reset.
*/
+ if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
+ cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ else
+ cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
+
bgmac_cmdcfg_maskset(bgmac,
~(BGMAC_CMDCFG_TE |
BGMAC_CMDCFG_RE |
@@ -1132,19 +974,20 @@ static void bgmac_chip_reset(struct bgmac *bgmac)
BGMAC_CMDCFG_PROM |
BGMAC_CMDCFG_NLC |
BGMAC_CMDCFG_CFE |
- BGMAC_CMDCFG_SR(core->id.rev),
+ cmdcfg_sr,
false);
bgmac->mac_speed = SPEED_UNKNOWN;
bgmac->mac_duplex = DUPLEX_UNKNOWN;
bgmac_clear_mib(bgmac);
- if (core->id.id == BCMA_CORE_4706_MAC_GBIT)
- bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0,
- BCMA_GMAC_CMN_PC_MTE);
+ if (bgmac->feature_flags & BGMAC_FEAT_CMN_PHY_CTL)
+ bgmac_cmn_maskset32(bgmac, BCMA_GMAC_CMN_PHY_CTL, ~0,
+ BCMA_GMAC_CMN_PC_MTE);
else
bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE);
bgmac_miiconfig(bgmac);
- bgmac_phy_init(bgmac);
+ if (bgmac->mii_bus)
+ bgmac->mii_bus->reset(bgmac->mii_bus);
netdev_reset_queue(bgmac->net_dev);
}
@@ -1163,50 +1006,51 @@ static void bgmac_chip_intrs_off(struct bgmac *bgmac)
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_enable */
static void bgmac_enable(struct bgmac *bgmac)
{
- struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo;
+ u32 cmdcfg_sr;
u32 cmdcfg;
u32 mode;
- u32 rxq_ctl;
- u32 fl_ctl;
- u16 bp_clk;
- u8 mdp;
+
+ if (bgmac->feature_flags & BGMAC_FEAT_CMDCFG_SR_REV4)
+ cmdcfg_sr = BGMAC_CMDCFG_SR_REV4;
+ else
+ cmdcfg_sr = BGMAC_CMDCFG_SR_REV0;
cmdcfg = bgmac_read(bgmac, BGMAC_CMDCFG);
bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE),
- BGMAC_CMDCFG_SR(bgmac->core->id.rev), true);
+ cmdcfg_sr, true);
udelay(2);
cmdcfg |= BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE;
bgmac_write(bgmac, BGMAC_CMDCFG, cmdcfg);
mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >>
BGMAC_DS_MM_SHIFT;
- if (ci->id != BCMA_CHIP_ID_BCM47162 || mode != 0)
+ if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0)
bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT);
- if (ci->id == BCMA_CHIP_ID_BCM47162 && mode == 2)
- bcma_chipco_chipctl_maskset(&bgmac->core->bus->drv_cc, 1, ~0,
- BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
-
- switch (ci->id) {
- case BCMA_CHIP_ID_BCM5357:
- case BCMA_CHIP_ID_BCM4749:
- case BCMA_CHIP_ID_BCM53572:
- case BCMA_CHIP_ID_BCM4716:
- case BCMA_CHIP_ID_BCM47162:
- fl_ctl = 0x03cb04cb;
- if (ci->id == BCMA_CHIP_ID_BCM5357 ||
- ci->id == BCMA_CHIP_ID_BCM4749 ||
- ci->id == BCMA_CHIP_ID_BCM53572)
+ if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2)
+ bgmac_cco_ctl_maskset(bgmac, 1, ~0,
+ BGMAC_CHIPCTL_1_RXC_DLL_BYPASS);
+
+ if (bgmac->feature_flags & (BGMAC_FEAT_FLW_CTRL1 |
+ BGMAC_FEAT_FLW_CTRL2)) {
+ u32 fl_ctl;
+
+ if (bgmac->feature_flags & BGMAC_FEAT_FLW_CTRL1)
fl_ctl = 0x2300e1;
+ else
+ fl_ctl = 0x03cb04cb;
+
bgmac_write(bgmac, BGMAC_FLOW_CTL_THRESH, fl_ctl);
bgmac_write(bgmac, BGMAC_PAUSE_CTL, 0x27fff);
- break;
}
- if (!bgmac_is_bcm4707_family(bgmac)) {
+ if (bgmac->feature_flags & BGMAC_FEAT_SET_RXQ_CLK) {
+ u32 rxq_ctl;
+ u16 bp_clk;
+ u8 mdp;
+
rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL);
rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK;
- bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) /
- 1000000;
+ bp_clk = bgmac_get_bus_clock(bgmac) / 1000000;
mdp = (bp_clk * 128 / 1000) - 3;
rxq_ctl |= (mdp << BGMAC_RXQ_CTL_MDP_SHIFT);
bgmac_write(bgmac, BGMAC_RXQ_CTL, rxq_ctl);
@@ -1250,7 +1094,7 @@ static irqreturn_t bgmac_interrupt(int irq, void *dev_id)
int_status &= ~(BGMAC_IS_TX0 | BGMAC_IS_RX);
if (int_status)
- bgmac_err(bgmac, "Unknown IRQs: 0x%08X\n", int_status);
+ dev_err(bgmac->dev, "Unknown IRQs: 0x%08X\n", int_status);
/* Disable new interrupts until handling existing ones */
bgmac_chip_intrs_off(bgmac);
@@ -1301,18 +1145,19 @@ static int bgmac_open(struct net_device *net_dev)
/* Specs say about reclaiming rings here, but we do that in DMA init */
bgmac_chip_init(bgmac);
- err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
+ err = request_irq(bgmac->irq, bgmac_interrupt, IRQF_SHARED,
KBUILD_MODNAME, net_dev);
if (err < 0) {
- bgmac_err(bgmac, "IRQ request error: %d!\n", err);
+ dev_err(bgmac->dev, "IRQ request error: %d!\n", err);
bgmac_dma_cleanup(bgmac);
return err;
}
napi_enable(&bgmac->napi);
- phy_start(bgmac->phy_dev);
+ phy_start(net_dev->phydev);
+
+ netif_start_queue(net_dev);
- netif_carrier_on(net_dev);
return 0;
}
@@ -1322,11 +1167,11 @@ static int bgmac_stop(struct net_device *net_dev)
netif_carrier_off(net_dev);
- phy_stop(bgmac->phy_dev);
+ phy_stop(net_dev->phydev);
napi_disable(&bgmac->napi);
bgmac_chip_intrs_off(bgmac);
- free_irq(bgmac->core->irq, net_dev);
+ free_irq(bgmac->irq, net_dev);
bgmac_chip_reset(bgmac);
bgmac_dma_cleanup(bgmac);
@@ -1360,12 +1205,10 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
static int bgmac_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd)
{
- struct bgmac *bgmac = netdev_priv(net_dev);
-
if (!netif_running(net_dev))
return -EINVAL;
- return phy_mii_ioctl(bgmac->phy_dev, ifr, cmd);
+ return phy_mii_ioctl(net_dev->phydev, ifr, cmd);
}
static const struct net_device_ops bgmac_netdev_ops = {
@@ -1382,54 +1225,151 @@ static const struct net_device_ops bgmac_netdev_ops = {
* ethtool_ops
**************************************************/
-static int bgmac_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+struct bgmac_stat {
+ u8 size;
+ u32 offset;
+ const char *name;
+};
+
+static struct bgmac_stat bgmac_get_strings_stats[] = {
+ { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" },
+ { 4, BGMAC_TX_GOOD_PKTS, "tx_good" },
+ { 8, BGMAC_TX_OCTETS, "tx_octets" },
+ { 4, BGMAC_TX_PKTS, "tx_pkts" },
+ { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" },
+ { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" },
+ { 4, BGMAC_TX_LEN_64, "tx_64" },
+ { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" },
+ { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" },
+ { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" },
+ { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" },
+ { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" },
+ { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" },
+ { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" },
+ { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" },
+ { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" },
+ { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" },
+ { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" },
+ { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" },
+ { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" },
+ { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" },
+ { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" },
+ { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" },
+ { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" },
+ { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" },
+ { 4, BGMAC_TX_DEFERED, "tx_defered" },
+ { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" },
+ { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" },
+ { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" },
+ { 4, BGMAC_TX_Q0_PKTS, "tx_q0" },
+ { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" },
+ { 4, BGMAC_TX_Q1_PKTS, "tx_q1" },
+ { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" },
+ { 4, BGMAC_TX_Q2_PKTS, "tx_q2" },
+ { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" },
+ { 4, BGMAC_TX_Q3_PKTS, "tx_q3" },
+ { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" },
+ { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" },
+ { 4, BGMAC_RX_GOOD_PKTS, "rx_good" },
+ { 8, BGMAC_RX_OCTETS, "rx_octets" },
+ { 4, BGMAC_RX_PKTS, "rx_pkts" },
+ { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" },
+ { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" },
+ { 4, BGMAC_RX_LEN_64, "rx_64" },
+ { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" },
+ { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" },
+ { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" },
+ { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" },
+ { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" },
+ { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" },
+ { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" },
+ { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" },
+ { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" },
+ { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" },
+ { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" },
+ { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" },
+ { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" },
+ { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" },
+ { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" },
+ { 4, BGMAC_RX_CRC_ERRS, "rx_crc" },
+ { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" },
+ { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" },
+ { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" },
+ { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" },
+ { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" },
+ { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" },
+};
+
+#define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats)
+
+static int bgmac_get_sset_count(struct net_device *dev, int string_set)
{
- struct bgmac *bgmac = netdev_priv(net_dev);
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BGMAC_STATS_LEN;
+ }
- return phy_ethtool_gset(bgmac->phy_dev, cmd);
+ return -EOPNOTSUPP;
}
-static int bgmac_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
+static void bgmac_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
{
- struct bgmac *bgmac = netdev_priv(net_dev);
+ int i;
- return phy_ethtool_sset(bgmac->phy_dev, cmd);
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < BGMAC_STATS_LEN; i++)
+ strlcpy(data + i * ETH_GSTRING_LEN,
+ bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
+}
+
+static void bgmac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *ss, uint64_t *data)
+{
+ struct bgmac *bgmac = netdev_priv(dev);
+ const struct bgmac_stat *s;
+ unsigned int i;
+ u64 val;
+
+ if (!netif_running(dev))
+ return;
+
+ for (i = 0; i < BGMAC_STATS_LEN; i++) {
+ s = &bgmac_get_strings_stats[i];
+ val = 0;
+ if (s->size == 8)
+ val = (u64)bgmac_read(bgmac, s->offset + 4) << 32;
+ val |= bgmac_read(bgmac, s->offset);
+ data[i] = val;
+ }
}
static void bgmac_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, "BCMA", sizeof(info->bus_info));
+ strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
}
static const struct ethtool_ops bgmac_ethtool_ops = {
- .get_settings = bgmac_get_settings,
- .set_settings = bgmac_set_settings,
+ .get_strings = bgmac_get_strings,
+ .get_sset_count = bgmac_get_sset_count,
+ .get_ethtool_stats = bgmac_get_ethtool_stats,
.get_drvinfo = bgmac_get_drvinfo,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/**************************************************
* MII
**************************************************/
-static int bgmac_mii_read(struct mii_bus *bus, int mii_id, int regnum)
-{
- return bgmac_phy_read(bus->priv, mii_id, regnum);
-}
-
-static int bgmac_mii_write(struct mii_bus *bus, int mii_id, int regnum,
- u16 value)
-{
- return bgmac_phy_write(bus->priv, mii_id, regnum, value);
-}
-
static void bgmac_adjust_link(struct net_device *net_dev)
{
struct bgmac *bgmac = netdev_priv(net_dev);
- struct phy_device *phy_dev = bgmac->phy_dev;
+ struct phy_device *phy_dev = net_dev->phydev;
bool update = false;
if (phy_dev->link) {
@@ -1450,7 +1390,7 @@ static void bgmac_adjust_link(struct net_device *net_dev)
}
}
-static int bgmac_fixed_phy_register(struct bgmac *bgmac)
+static int bgmac_phy_connect_direct(struct bgmac *bgmac)
{
struct fixed_phy_status fphy_status = {
.link = 1,
@@ -1462,196 +1402,76 @@ static int bgmac_fixed_phy_register(struct bgmac *bgmac)
phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL);
if (!phy_dev || IS_ERR(phy_dev)) {
- bgmac_err(bgmac, "Failed to register fixed PHY device\n");
+ dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
return -ENODEV;
}
err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (err) {
- bgmac_err(bgmac, "Connecting PHY failed\n");
+ dev_err(bgmac->dev, "Connecting PHY failed\n");
return err;
}
- bgmac->phy_dev = phy_dev;
-
return err;
}
-static int bgmac_mii_register(struct bgmac *bgmac)
+static int bgmac_phy_connect(struct bgmac *bgmac)
{
- struct mii_bus *mii_bus;
struct phy_device *phy_dev;
char bus_id[MII_BUS_ID_SIZE + 3];
- int err = 0;
-
- if (bgmac_is_bcm4707_family(bgmac))
- return bgmac_fixed_phy_register(bgmac);
-
- mii_bus = mdiobus_alloc();
- if (!mii_bus)
- return -ENOMEM;
-
- mii_bus->name = "bgmac mii bus";
- sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num,
- bgmac->core->core_unit);
- mii_bus->priv = bgmac;
- mii_bus->read = bgmac_mii_read;
- mii_bus->write = bgmac_mii_write;
- mii_bus->parent = &bgmac->core->dev;
- mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
-
- err = mdiobus_register(mii_bus);
- if (err) {
- bgmac_err(bgmac, "Registration of mii bus failed\n");
- goto err_free_bus;
- }
-
- bgmac->mii_bus = mii_bus;
/* Connect to the PHY */
- snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, mii_bus->id,
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id,
bgmac->phyaddr);
phy_dev = phy_connect(bgmac->net_dev, bus_id, &bgmac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phy_dev)) {
- bgmac_err(bgmac, "PHY connecton failed\n");
- err = PTR_ERR(phy_dev);
- goto err_unregister_bus;
+ dev_err(bgmac->dev, "PHY connecton failed\n");
+ return PTR_ERR(phy_dev);
}
- bgmac->phy_dev = phy_dev;
-
- return err;
-err_unregister_bus:
- mdiobus_unregister(mii_bus);
-err_free_bus:
- mdiobus_free(mii_bus);
- return err;
-}
-
-static void bgmac_mii_unregister(struct bgmac *bgmac)
-{
- struct mii_bus *mii_bus = bgmac->mii_bus;
-
- mdiobus_unregister(mii_bus);
- mdiobus_free(mii_bus);
+ return 0;
}
-/**************************************************
- * BCMA bus ops
- **************************************************/
-
-/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */
-static int bgmac_probe(struct bcma_device *core)
+int bgmac_enet_probe(struct bgmac *info)
{
struct net_device *net_dev;
struct bgmac *bgmac;
- struct ssb_sprom *sprom = &core->bus->sprom;
- u8 *mac;
int err;
- switch (core->core_unit) {
- case 0:
- mac = sprom->et0mac;
- break;
- case 1:
- mac = sprom->et1mac;
- break;
- case 2:
- mac = sprom->et2mac;
- break;
- default:
- pr_err("Unsupported core_unit %d\n", core->core_unit);
- return -ENOTSUPP;
- }
-
- if (!is_valid_ether_addr(mac)) {
- dev_err(&core->dev, "Invalid MAC addr: %pM\n", mac);
- eth_random_addr(mac);
- dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
- }
-
- /* This (reset &) enable is not preset in specs or reference driver but
- * Broadcom does it in arch PCI code when enabling fake PCI device.
- */
- bcma_core_enable(core, 0);
-
/* Allocation and references */
net_dev = alloc_etherdev(sizeof(*bgmac));
if (!net_dev)
return -ENOMEM;
+
net_dev->netdev_ops = &bgmac_netdev_ops;
- net_dev->irq = core->irq;
net_dev->ethtool_ops = &bgmac_ethtool_ops;
bgmac = netdev_priv(net_dev);
+ memcpy(bgmac, info, sizeof(*bgmac));
bgmac->net_dev = net_dev;
- bgmac->core = core;
- bcma_set_drvdata(core, bgmac);
-
- /* Defaults */
- memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN);
-
- /* On BCM4706 we need common core to access PHY */
- if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
- !core->bus->drv_gmac_cmn.core) {
- bgmac_err(bgmac, "GMAC CMN core not found (required for BCM4706)\n");
- err = -ENODEV;
- goto err_netdev_free;
- }
- bgmac->cmn = core->bus->drv_gmac_cmn.core;
-
- switch (core->core_unit) {
- case 0:
- bgmac->phyaddr = sprom->et0phyaddr;
- break;
- case 1:
- bgmac->phyaddr = sprom->et1phyaddr;
- break;
- case 2:
- bgmac->phyaddr = sprom->et2phyaddr;
- break;
- }
- bgmac->phyaddr &= BGMAC_PHY_MASK;
- if (bgmac->phyaddr == BGMAC_PHY_MASK) {
- bgmac_err(bgmac, "No PHY found\n");
- err = -ENODEV;
- goto err_netdev_free;
+ net_dev->irq = bgmac->irq;
+ SET_NETDEV_DEV(net_dev, bgmac->dev);
+
+ if (!is_valid_ether_addr(bgmac->mac_addr)) {
+ dev_err(bgmac->dev, "Invalid MAC addr: %pM\n",
+ bgmac->mac_addr);
+ eth_random_addr(bgmac->mac_addr);
+ dev_warn(bgmac->dev, "Using random MAC: %pM\n",
+ bgmac->mac_addr);
}
- bgmac_info(bgmac, "Found PHY addr: %d%s\n", bgmac->phyaddr,
- bgmac->phyaddr == BGMAC_PHY_NOREGS ? " (NOREGS)" : "");
+ ether_addr_copy(net_dev->dev_addr, bgmac->mac_addr);
- if (core->bus->hosttype == BCMA_HOSTTYPE_PCI) {
- bgmac_err(bgmac, "PCI setup not implemented\n");
- err = -ENOTSUPP;
- goto err_netdev_free;
- }
+ /* This (reset &) enable is not preset in specs or reference driver but
+ * Broadcom does it in arch PCI code when enabling fake PCI device.
+ */
+ bgmac_clk_enable(bgmac, 0);
bgmac_chip_reset(bgmac);
- /* For Northstar, we have to take all GMAC core out of reset */
- if (bgmac_is_bcm4707_family(bgmac)) {
- struct bcma_device *ns_core;
- int ns_gmac;
-
- /* Northstar has 4 GMAC cores */
- for (ns_gmac = 0; ns_gmac < 4; ns_gmac++) {
- /* As Northstar requirement, we have to reset all GMACs
- * before accessing one. bgmac_chip_reset() call
- * bcma_core_enable() for this core. Then the other
- * three GMACs didn't reset. We do it here.
- */
- ns_core = bcma_find_core_unit(core->bus,
- BCMA_CORE_MAC_GBIT,
- ns_gmac);
- if (ns_core && !bcma_core_is_enabled(ns_core))
- bcma_core_enable(ns_core, 0);
- }
- }
-
err = bgmac_dma_alloc(bgmac);
if (err) {
- bgmac_err(bgmac, "Unable to alloc memory for DMA\n");
+ dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
goto err_netdev_free;
}
@@ -1659,22 +1479,14 @@ static int bgmac_probe(struct bcma_device *core)
if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
- /* TODO: reset the external phy. Specs are needed */
- bgmac_phy_reset(bgmac);
-
- bgmac->has_robosw = !!(core->bus->sprom.boardflags_lo &
- BGMAC_BFL_ENETROBO);
- if (bgmac->has_robosw)
- bgmac_warn(bgmac, "Support for Roboswitch not implemented\n");
-
- if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
- bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
-
netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
- err = bgmac_mii_register(bgmac);
+ if (!bgmac->mii_bus)
+ err = bgmac_phy_connect_direct(bgmac);
+ else
+ err = bgmac_phy_connect(bgmac);
if (err) {
- bgmac_err(bgmac, "Cannot register MDIO\n");
+ dev_err(bgmac->dev, "Cannot connect to phy\n");
goto err_dma_free;
}
@@ -1684,64 +1496,34 @@ static int bgmac_probe(struct bcma_device *core)
err = register_netdev(bgmac->net_dev);
if (err) {
- bgmac_err(bgmac, "Cannot register net device\n");
- goto err_mii_unregister;
+ dev_err(bgmac->dev, "Cannot register net device\n");
+ goto err_phy_disconnect;
}
netif_carrier_off(net_dev);
return 0;
-err_mii_unregister:
- bgmac_mii_unregister(bgmac);
+err_phy_disconnect:
+ phy_disconnect(net_dev->phydev);
err_dma_free:
bgmac_dma_free(bgmac);
-
err_netdev_free:
- bcma_set_drvdata(core, NULL);
free_netdev(net_dev);
return err;
}
+EXPORT_SYMBOL_GPL(bgmac_enet_probe);
-static void bgmac_remove(struct bcma_device *core)
+void bgmac_enet_remove(struct bgmac *bgmac)
{
- struct bgmac *bgmac = bcma_get_drvdata(core);
-
unregister_netdev(bgmac->net_dev);
- bgmac_mii_unregister(bgmac);
+ phy_disconnect(bgmac->net_dev->phydev);
netif_napi_del(&bgmac->napi);
bgmac_dma_free(bgmac);
- bcma_set_drvdata(core, NULL);
free_netdev(bgmac->net_dev);
}
-
-static struct bcma_driver bgmac_bcma_driver = {
- .name = KBUILD_MODNAME,
- .id_table = bgmac_bcma_tbl,
- .probe = bgmac_probe,
- .remove = bgmac_remove,
-};
-
-static int __init bgmac_init(void)
-{
- int err;
-
- err = bcma_driver_register(&bgmac_bcma_driver);
- if (err)
- return err;
- pr_info("Broadcom 47xx GBit MAC driver loaded\n");
-
- return 0;
-}
-
-static void __exit bgmac_exit(void)
-{
- bcma_driver_unregister(&bgmac_bcma_driver);
-}
-
-module_init(bgmac_init)
-module_exit(bgmac_exit)
+EXPORT_SYMBOL_GPL(bgmac_enet_remove);
MODULE_AUTHOR("Rafał Miłecki");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 9a03c142b742..24a250267b88 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -1,19 +1,6 @@
#ifndef _BGMAC_H
#define _BGMAC_H
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#define bgmac_err(bgmac, fmt, ...) \
- dev_err(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
-#define bgmac_warn(bgmac, fmt, ...) \
- dev_warn(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
-#define bgmac_info(bgmac, fmt, ...) \
- dev_info(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
-#define bgmac_dbg(bgmac, fmt, ...) \
- dev_dbg(&(bgmac)->core->dev, fmt, ##__VA_ARGS__)
-
-#include <linux/bcma/bcma.h>
-#include <linux/brcmphy.h>
#include <linux/netdevice.h>
#define BGMAC_DEV_CTL 0x000
@@ -123,7 +110,7 @@
#define BGMAC_TX_LEN_1024_TO_1522 0x334
#define BGMAC_TX_LEN_1523_TO_2047 0x338
#define BGMAC_TX_LEN_2048_TO_4095 0x33c
-#define BGMAC_TX_LEN_4095_TO_8191 0x340
+#define BGMAC_TX_LEN_4096_TO_8191 0x340
#define BGMAC_TX_LEN_8192_TO_MAX 0x344
#define BGMAC_TX_JABBER_PKTS 0x348 /* Error */
#define BGMAC_TX_OVERSIZE_PKTS 0x34c /* Error */
@@ -166,7 +153,7 @@
#define BGMAC_RX_LEN_1024_TO_1522 0x3e4
#define BGMAC_RX_LEN_1523_TO_2047 0x3e8
#define BGMAC_RX_LEN_2048_TO_4095 0x3ec
-#define BGMAC_RX_LEN_4095_TO_8191 0x3f0
+#define BGMAC_RX_LEN_4096_TO_8191 0x3f0
#define BGMAC_RX_LEN_8192_TO_MAX 0x3f4
#define BGMAC_RX_JABBER_PKTS 0x3f8 /* Error */
#define BGMAC_RX_OVERSIZE_PKTS 0x3fc /* Error */
@@ -201,7 +188,6 @@
#define BGMAC_CMDCFG_HD_SHIFT 10
#define BGMAC_CMDCFG_SR_REV0 0x00000800 /* Set to reset mode, for core rev 0-3 */
#define BGMAC_CMDCFG_SR_REV4 0x00002000 /* Set to reset mode, for core rev >= 4 */
-#define BGMAC_CMDCFG_SR(rev) ((rev >= 4) ? BGMAC_CMDCFG_SR_REV4 : BGMAC_CMDCFG_SR_REV0)
#define BGMAC_CMDCFG_ML 0x00008000 /* Set to activate mac loopback mode */
#define BGMAC_CMDCFG_AE 0x00400000
#define BGMAC_CMDCFG_CFE 0x00800000
@@ -387,6 +373,24 @@
#define ETHER_MAX_LEN 1518
+/* Feature Flags */
+#define BGMAC_FEAT_TX_MASK_SETUP BIT(0)
+#define BGMAC_FEAT_RX_MASK_SETUP BIT(1)
+#define BGMAC_FEAT_IOST_ATTACHED BIT(2)
+#define BGMAC_FEAT_NO_RESET BIT(3)
+#define BGMAC_FEAT_MISC_PLL_REQ BIT(4)
+#define BGMAC_FEAT_SW_TYPE_PHY BIT(5)
+#define BGMAC_FEAT_SW_TYPE_EPHYRMII BIT(6)
+#define BGMAC_FEAT_SW_TYPE_RGMII BIT(7)
+#define BGMAC_FEAT_CMN_PHY_CTL BIT(8)
+#define BGMAC_FEAT_FLW_CTRL1 BIT(9)
+#define BGMAC_FEAT_FLW_CTRL2 BIT(10)
+#define BGMAC_FEAT_SET_RXQ_CLK BIT(11)
+#define BGMAC_FEAT_CLKCTLST BIT(12)
+#define BGMAC_FEAT_NO_CLR_MIB BIT(13)
+#define BGMAC_FEAT_FORCE_SPEED_2500 BIT(14)
+#define BGMAC_FEAT_CMDCFG_SR_REV4 BIT(15)
+
struct bgmac_slot_info {
union {
struct sk_buff *skb;
@@ -436,12 +440,26 @@ struct bgmac_rx_header {
};
struct bgmac {
- struct bcma_device *core;
- struct bcma_device *cmn; /* Reference to CMN core for BCM4706 */
+ union {
+ struct {
+ void *base;
+ void *idm_base;
+ } plat;
+ struct {
+ struct bcma_device *core;
+ /* Reference to CMN core for BCM4706 */
+ struct bcma_device *cmn;
+ } bcma;
+ };
+
+ struct device *dev;
+ struct device *dma_dev;
+ unsigned char mac_addr[ETH_ALEN];
+ u32 feature_flags;
+
struct net_device *net_dev;
struct napi_struct napi;
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
/* DMA */
struct bgmac_dma_ring tx_ring[BGMAC_MAX_TX_RINGS];
@@ -453,6 +471,7 @@ struct bgmac {
u32 mib_rx_regs[BGMAC_NUM_MIB_RX_REGS];
/* Int */
+ int irq;
u32 int_mask;
/* Current MAC state */
@@ -463,16 +482,71 @@ struct bgmac {
bool has_robosw;
bool loopback;
+
+ u32 (*read)(struct bgmac *bgmac, u16 offset);
+ void (*write)(struct bgmac *bgmac, u16 offset, u32 value);
+ u32 (*idm_read)(struct bgmac *bgmac, u16 offset);
+ void (*idm_write)(struct bgmac *bgmac, u16 offset, u32 value);
+ bool (*clk_enabled)(struct bgmac *bgmac);
+ void (*clk_enable)(struct bgmac *bgmac, u32 flags);
+ void (*cco_ctl_maskset)(struct bgmac *bgmac, u32 offset, u32 mask,
+ u32 set);
+ u32 (*get_bus_clock)(struct bgmac *bgmac);
+ void (*cmn_maskset32)(struct bgmac *bgmac, u16 offset, u32 mask,
+ u32 set);
};
+int bgmac_enet_probe(struct bgmac *info);
+void bgmac_enet_remove(struct bgmac *bgmac);
+
+struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr);
+void bcma_mdio_mii_unregister(struct mii_bus *mii_bus);
+
static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset)
{
- return bcma_read32(bgmac->core, offset);
+ return bgmac->read(bgmac, offset);
}
static inline void bgmac_write(struct bgmac *bgmac, u16 offset, u32 value)
{
- bcma_write32(bgmac->core, offset, value);
+ bgmac->write(bgmac, offset, value);
+}
+
+static inline u32 bgmac_idm_read(struct bgmac *bgmac, u16 offset)
+{
+ return bgmac->idm_read(bgmac, offset);
+}
+
+static inline void bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value)
+{
+ bgmac->idm_write(bgmac, offset, value);
+}
+
+static inline bool bgmac_clk_enabled(struct bgmac *bgmac)
+{
+ return bgmac->clk_enabled(bgmac);
+}
+
+static inline void bgmac_clk_enable(struct bgmac *bgmac, u32 flags)
+{
+ bgmac->clk_enable(bgmac, flags);
+}
+
+static inline void bgmac_cco_ctl_maskset(struct bgmac *bgmac, u32 offset,
+ u32 mask, u32 set)
+{
+ bgmac->cco_ctl_maskset(bgmac, offset, mask, set);
+}
+
+static inline u32 bgmac_get_bus_clock(struct bgmac *bgmac)
+{
+ return bgmac->get_bus_clock(bgmac);
+}
+
+static inline void bgmac_cmn_maskset32(struct bgmac *bgmac, u16 offset,
+ u32 mask, u32 set)
+{
+ bgmac->cmn_maskset32(bgmac, offset, mask, set);
}
static inline void bgmac_maskset(struct bgmac *bgmac, u16 offset, u32 mask,
@@ -490,5 +564,4 @@ static inline void bgmac_set(struct bgmac *bgmac, u16 offset, u32 set)
{
bgmac_maskset(bgmac, offset, ~0, set);
}
-
#endif /* _BGMAC_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 8fc3f3c137f8..505ceaf451e2 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6356,10 +6356,6 @@ bnx2_open(struct net_device *dev)
struct bnx2 *bp = netdev_priv(dev);
int rc;
- rc = bnx2_request_firmware(bp);
- if (rc < 0)
- goto out;
-
netif_carrier_off(dev);
bnx2_disable_int(bp);
@@ -6428,7 +6424,6 @@ open_err:
bnx2_free_irq(bp);
bnx2_free_mem(bp);
bnx2_del_napi(bp);
- bnx2_release_firmware(bp);
goto out;
}
@@ -8575,6 +8570,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
+ rc = bnx2_request_firmware(bp);
+ if (rc < 0)
+ goto error;
+
+
+ bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -8607,6 +8608,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
error:
+ bnx2_release_firmware(bp);
pci_iounmap(pdev, bp->regview);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index d465bd721146..fa3386bb14f7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -59,9 +59,6 @@
#include <linux/semaphore.h>
#include <linux/stringify.h>
#include <linux/vmalloc.h>
-#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
-#include <net/geneve.h>
-#endif
#include "bnx2x.h"
#include "bnx2x_init.h"
#include "bnx2x_init_ops.h"
@@ -775,6 +772,11 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff));
+ if (pci_channel_offline(bp->pdev)) {
+ BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
+ return;
+ }
+
val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
@@ -9418,10 +9420,16 @@ unload_error:
/* Release IRQs */
bnx2x_free_irq(bp);
- /* Reset the chip */
- rc = bnx2x_reset_hw(bp, reset_code);
- if (rc)
- BNX2X_ERR("HW_RESET failed\n");
+ /* Reset the chip, unless PCI function is offline. If we reach this
+ * point following a PCI error handling, it means device is really
+ * in a bad state and we're about to remove it, so reset the chip
+ * is not a good idea.
+ */
+ if (!pci_channel_offline(bp->pdev)) {
+ rc = bnx2x_reset_hw(bp, reset_code);
+ if (rc)
+ BNX2X_ERR("HW_RESET failed\n");
+ }
/* Report UNLOAD_DONE to MCP */
bnx2x_send_unload_done(bp, keep_link);
@@ -10076,7 +10084,6 @@ static void bnx2x_parity_recover(struct bnx2x *bp)
}
}
-#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
static int bnx2x_udp_port_update(struct bnx2x *bp)
{
struct bnx2x_func_switch_update_params *switch_update_params;
@@ -10177,47 +10184,42 @@ static void __bnx2x_del_udp_port(struct bnx2x *bp, u16 port,
DP(BNX2X_MSG_SP, "Deleted UDP tunnel [%d] port %d\n",
type, port);
}
-#endif
-
-#ifdef CONFIG_BNX2X_VXLAN
-static void bnx2x_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
-{
- struct bnx2x *bp = netdev_priv(netdev);
- u16 t_port = ntohs(port);
-
- __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
-}
-static void bnx2x_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+static void bnx2x_udp_tunnel_add(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct bnx2x *bp = netdev_priv(netdev);
- u16 t_port = ntohs(port);
+ u16 t_port = ntohs(ti->port);
- __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
-}
-#endif
-
-#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
-static void bnx2x_add_geneve_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
-{
- struct bnx2x *bp = netdev_priv(netdev);
- u16 t_port = ntohs(port);
-
- __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ __bnx2x_add_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
+ break;
+ default:
+ break;
+ }
}
-static void bnx2x_del_geneve_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+static void bnx2x_udp_tunnel_del(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct bnx2x *bp = netdev_priv(netdev);
- u16 t_port = ntohs(port);
+ u16 t_port = ntohs(ti->port);
- __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_VXLAN);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ __bnx2x_del_udp_port(bp, t_port, BNX2X_UDP_PORT_GENEVE);
+ break;
+ default:
+ break;
+ }
}
-#endif
static int bnx2x_close(struct net_device *dev);
@@ -10325,7 +10327,6 @@ sp_rtnl_not_reset:
&bp->sp_rtnl_state))
bnx2x_update_mng_version(bp);
-#if defined(CONFIG_BNX2X_VXLAN) || IS_ENABLED(CONFIG_BNX2X_GENEVE)
if (test_and_clear_bit(BNX2X_SP_RTNL_CHANGE_UDP_PORT,
&bp->sp_rtnl_state)) {
if (bnx2x_udp_port_update(bp)) {
@@ -10335,20 +10336,14 @@ sp_rtnl_not_reset:
BNX2X_UDP_PORT_MAX);
} else {
/* Since we don't store additional port information,
- * if no port is configured for any feature ask for
+ * if no ports are configured for any feature ask for
* information about currently configured ports.
*/
-#ifdef CONFIG_BNX2X_VXLAN
- if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count)
- vxlan_get_rx_port(bp->dev);
-#endif
-#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
- if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
- geneve_get_rx_port(bp->dev);
-#endif
+ if (!bp->udp_tunnel_ports[BNX2X_UDP_PORT_VXLAN].count &&
+ !bp->udp_tunnel_ports[BNX2X_UDP_PORT_GENEVE].count)
+ udp_tunnel_get_rx_info(bp->dev);
}
}
-#endif
/* work which needs rtnl lock not-taken (as it takes the lock itself and
* can be called from other contexts as well)
@@ -12551,14 +12546,8 @@ static int bnx2x_open(struct net_device *dev)
if (rc)
return rc;
-#ifdef CONFIG_BNX2X_VXLAN
- if (IS_PF(bp))
- vxlan_get_rx_port(dev);
-#endif
-#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
if (IS_PF(bp))
- geneve_get_rx_port(dev);
-#endif
+ udp_tunnel_get_rx_info(dev);
return 0;
}
@@ -12895,52 +12884,71 @@ static int __bnx2x_vlan_configure_vid(struct bnx2x *bp, u16 vid, bool add)
return rc;
}
-int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+static int bnx2x_vlan_configure_vid_list(struct bnx2x *bp)
{
struct bnx2x_vlan_entry *vlan;
int rc = 0;
- if (!bp->vlan_cnt) {
- DP(NETIF_MSG_IFUP, "No need to re-configure vlan filters\n");
- return 0;
- }
-
+ /* Configure all non-configured entries */
list_for_each_entry(vlan, &bp->vlan_reg, link) {
- /* Prepare for cleanup in case of errors */
- if (rc) {
- vlan->hw = false;
- continue;
- }
-
- if (!vlan->hw)
+ if (vlan->hw)
continue;
- DP(NETIF_MSG_IFUP, "Re-configuring vlan 0x%04x\n", vlan->vid);
+ if (bp->vlan_cnt >= bp->vlan_credit)
+ return -ENOBUFS;
rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
if (rc) {
- BNX2X_ERR("Unable to configure VLAN %d\n", vlan->vid);
- vlan->hw = false;
- rc = -EINVAL;
- continue;
+ BNX2X_ERR("Unable to config VLAN %d\n", vlan->vid);
+ return rc;
}
+
+ DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n", vlan->vid);
+ vlan->hw = true;
+ bp->vlan_cnt++;
}
- return rc;
+ return 0;
+}
+
+static void bnx2x_vlan_configure(struct bnx2x *bp, bool set_rx_mode)
+{
+ bool need_accept_any_vlan;
+
+ need_accept_any_vlan = !!bnx2x_vlan_configure_vid_list(bp);
+
+ if (bp->accept_any_vlan != need_accept_any_vlan) {
+ bp->accept_any_vlan = need_accept_any_vlan;
+ DP(NETIF_MSG_IFUP, "Accept all VLAN %s\n",
+ bp->accept_any_vlan ? "raised" : "cleared");
+ if (set_rx_mode) {
+ if (IS_PF(bp))
+ bnx2x_set_rx_mode_inner(bp);
+ else
+ bnx2x_vfpf_storm_rx_mode(bp);
+ }
+ }
+}
+
+int bnx2x_vlan_reconfigure_vid(struct bnx2x *bp)
+{
+ struct bnx2x_vlan_entry *vlan;
+
+ /* The hw forgot all entries after reload */
+ list_for_each_entry(vlan, &bp->vlan_reg, link)
+ vlan->hw = false;
+ bp->vlan_cnt = 0;
+
+ /* Don't set rx mode here. Our caller will do it. */
+ bnx2x_vlan_configure(bp, false);
+
+ return 0;
}
static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
- bool hw = false;
- int rc = 0;
-
- if (!netif_running(bp->dev)) {
- DP(NETIF_MSG_IFUP,
- "Ignoring VLAN configuration the interface is down\n");
- return -EFAULT;
- }
DP(NETIF_MSG_IFUP, "Adding VLAN %d\n", vid);
@@ -12948,93 +12956,47 @@ static int bnx2x_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
if (!vlan)
return -ENOMEM;
- bp->vlan_cnt++;
- if (bp->vlan_cnt > bp->vlan_credit && !bp->accept_any_vlan) {
- DP(NETIF_MSG_IFUP, "Accept all VLAN raised\n");
- bp->accept_any_vlan = true;
- if (IS_PF(bp))
- bnx2x_set_rx_mode_inner(bp);
- else
- bnx2x_vfpf_storm_rx_mode(bp);
- } else if (bp->vlan_cnt <= bp->vlan_credit) {
- rc = __bnx2x_vlan_configure_vid(bp, vid, true);
- hw = true;
- }
-
vlan->vid = vid;
- vlan->hw = hw;
+ vlan->hw = false;
+ list_add_tail(&vlan->link, &bp->vlan_reg);
- if (!rc) {
- list_add(&vlan->link, &bp->vlan_reg);
- } else {
- bp->vlan_cnt--;
- kfree(vlan);
- }
-
- DP(NETIF_MSG_IFUP, "Adding VLAN result %d\n", rc);
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
- return rc;
+ return 0;
}
static int bnx2x_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct bnx2x *bp = netdev_priv(dev);
struct bnx2x_vlan_entry *vlan;
+ bool found = false;
int rc = 0;
- if (!netif_running(bp->dev)) {
- DP(NETIF_MSG_IFUP,
- "Ignoring VLAN configuration the interface is down\n");
- return -EFAULT;
- }
-
DP(NETIF_MSG_IFUP, "Removing VLAN %d\n", vid);
- if (!bp->vlan_cnt) {
- BNX2X_ERR("Unable to kill VLAN %d\n", vid);
- return -EINVAL;
- }
-
list_for_each_entry(vlan, &bp->vlan_reg, link)
- if (vlan->vid == vid)
+ if (vlan->vid == vid) {
+ found = true;
break;
+ }
- if (vlan->vid != vid) {
+ if (!found) {
BNX2X_ERR("Unable to kill VLAN %d - not found\n", vid);
return -EINVAL;
}
- if (vlan->hw)
+ if (netif_running(dev) && vlan->hw) {
rc = __bnx2x_vlan_configure_vid(bp, vid, false);
+ DP(NETIF_MSG_IFUP, "HW deconfigured for VLAN %d\n", vid);
+ bp->vlan_cnt--;
+ }
list_del(&vlan->link);
kfree(vlan);
- bp->vlan_cnt--;
-
- if (bp->vlan_cnt <= bp->vlan_credit && bp->accept_any_vlan) {
- /* Configure all non-configured entries */
- list_for_each_entry(vlan, &bp->vlan_reg, link) {
- if (vlan->hw)
- continue;
-
- rc = __bnx2x_vlan_configure_vid(bp, vlan->vid, true);
- if (rc) {
- BNX2X_ERR("Unable to config VLAN %d\n",
- vlan->vid);
- continue;
- }
- DP(NETIF_MSG_IFUP, "HW configured for VLAN %d\n",
- vlan->vid);
- vlan->hw = true;
- }
- DP(NETIF_MSG_IFUP, "Accept all VLAN Removed\n");
- bp->accept_any_vlan = false;
- if (IS_PF(bp))
- bnx2x_set_rx_mode_inner(bp);
- else
- bnx2x_vfpf_storm_rx_mode(bp);
- }
+ if (netif_running(dev))
+ bnx2x_vlan_configure(bp, true);
DP(NETIF_MSG_IFUP, "Removing VLAN result %d\n", rc);
@@ -13072,14 +13034,8 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
.ndo_features_check = bnx2x_features_check,
-#ifdef CONFIG_BNX2X_VXLAN
- .ndo_add_vxlan_port = bnx2x_add_vxlan_port,
- .ndo_del_vxlan_port = bnx2x_del_vxlan_port,
-#endif
-#if IS_ENABLED(CONFIG_BNX2X_GENEVE)
- .ndo_add_geneve_port = bnx2x_add_geneve_port,
- .ndo_del_geneve_port = bnx2x_del_geneve_port,
-#endif
+ .ndo_udp_tunnel_add = bnx2x_udp_tunnel_add,
+ .ndo_udp_tunnel_del = bnx2x_udp_tunnel_del,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
@@ -13259,12 +13215,11 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
if (!chip_is_e1x) {
dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+ NETIF_F_GSO_IPXIP4;
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
- NETIF_F_GSO_IPIP |
- NETIF_F_GSO_SIT |
+ NETIF_F_GSO_IPXIP4 |
NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
}
@@ -13942,14 +13897,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
bp->doorbells = bnx2x_vf_doorbells(bp);
rc = bnx2x_vf_pci_alloc(bp);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
} else {
doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
if (doorbell_size > pci_resource_len(pdev, 2)) {
dev_err(&bp->pdev->dev,
"Cannot map doorbells, bar size too small, aborting\n");
rc = -ENOMEM;
- goto init_one_exit;
+ goto init_one_freemem;
}
bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
doorbell_size);
@@ -13958,19 +13913,19 @@ static int bnx2x_init_one(struct pci_dev *pdev,
dev_err(&bp->pdev->dev,
"Cannot map doorbell space, aborting\n");
rc = -ENOMEM;
- goto init_one_exit;
+ goto init_one_freemem;
}
if (IS_VF(bp)) {
rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
}
/* Enable SRIOV if capability found in configuration space */
rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
if (rc)
- goto init_one_exit;
+ goto init_one_freemem;
/* calc qm_cid_count */
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
@@ -13989,7 +13944,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = bnx2x_set_int_mode(bp);
if (rc) {
dev_err(&pdev->dev, "Cannot set interrupts\n");
- goto init_one_exit;
+ goto init_one_freemem;
}
BNX2X_DEV_INFO("set interrupts successfully\n");
@@ -13997,7 +13952,7 @@ static int bnx2x_init_one(struct pci_dev *pdev,
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Cannot register net device\n");
- goto init_one_exit;
+ goto init_one_freemem;
}
BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
@@ -14030,6 +13985,9 @@ static int bnx2x_init_one(struct pci_dev *pdev,
return 0;
+init_one_freemem:
+ bnx2x_free_mem_bp(bp);
+
init_one_exit:
bnx2x_disable_pcie_error_reporting(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index c39a7f5c6a01..228c964e709a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -37,9 +37,7 @@
#include <net/udp.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
-#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
-#include <net/vxlan.h>
-#endif
+#include <net/udp_tunnel.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
#include <net/busy_poll.h>
#endif
@@ -75,11 +73,32 @@ enum board_idx {
BCM57301,
BCM57302,
BCM57304,
+ BCM57417_NPAR,
+ BCM58700,
+ BCM57311,
+ BCM57312,
BCM57402,
BCM57404,
BCM57406,
+ BCM57402_NPAR,
+ BCM57407,
+ BCM57412,
+ BCM57414,
+ BCM57416,
+ BCM57417,
+ BCM57412_NPAR,
+ BCM57314,
+ BCM57417_SFP,
+ BCM57416_SFP,
+ BCM57404_NPAR,
+ BCM57406_NPAR,
+ BCM57407_SFP,
+ BCM57414_NPAR,
+ BCM57416_NPAR,
BCM57304_VF,
BCM57404_VF,
+ BCM57414_VF,
+ BCM57314_VF,
};
/* indexed by enum above */
@@ -89,23 +108,65 @@ static const struct {
{ "Broadcom BCM57301 NetXtreme-C Single-port 10Gb Ethernet" },
{ "Broadcom BCM57302 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM58700 Nitro 4-port 1Gb/2.5Gb/10Gb Ethernet" },
+ { "Broadcom BCM57311 NetXtreme-C Single-port 10Gb Ethernet" },
+ { "Broadcom BCM57312 NetXtreme-C Dual-port 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57402 NetXtreme-E Dual-port 10Gb Ethernet" },
{ "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
{ "Broadcom BCM57406 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57407 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57412 NetXtreme-E Dual-port 10Gb Ethernet" },
+ { "Broadcom BCM57414 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57416 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57417 NetXtreme-E Dual-port 10GBase-T Ethernet" },
+ { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57314 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
+ { "Broadcom BCM57417 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
+ { "Broadcom BCM57416 NetXtreme-E Dual-port 10Gb Ethernet" },
+ { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57407 NetXtreme-E Dual-port 25Gb Ethernet" },
+ { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
+ { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
{ "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
{ "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
+ { "Broadcom BCM57414 NetXtreme-E Ethernet Virtual Function" },
+ { "Broadcom BCM57314 NetXtreme-E Ethernet Virtual Function" },
};
static const struct pci_device_id bnxt_pci_tbl[] = {
{ PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
{ PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
{ PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
+ { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
+ { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
+ { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
{ PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
{ PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
{ PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
+ { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
+ { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
+ { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
+ { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
+ { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
+ { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
+ { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
+ { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
+ { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
+ { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
+ { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
#ifdef CONFIG_BNXT_SRIOV
{ PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
{ PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = BCM57414_VF },
+ { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = BCM57314_VF },
#endif
{ 0 }
};
@@ -118,9 +179,18 @@ static const u16 bnxt_vf_req_snif[] = {
HWRM_CFA_L2_FILTER_ALLOC,
};
+static const u16 bnxt_async_events_arr[] = {
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
+ HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
+};
+
static bool bnxt_vf_pciid(enum board_idx idx)
{
- return (idx == BCM57304_VF || idx == BCM57404_VF);
+ return (idx == BCM57304_VF || idx == BCM57404_VF ||
+ idx == BCM57314_VF || idx == BCM57414_VF);
}
#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
@@ -276,19 +346,20 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
txr->tx_prod = prod;
+ tx_buf->is_push = 1;
netdev_tx_sent_queue(txq, skb->len);
+ wmb(); /* Sync is_push and byte queue before pushing data */
push_len = (length + sizeof(*tx_push) + 7) / 8;
if (push_len > 16) {
__iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
- __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
- push_len - 16);
+ __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
+ (push_len - 16) << 1);
} else {
__iowrite64_copy(txr->tx_doorbell, tx_push_buf,
push_len);
}
- tx_buf->is_push = 1;
goto tx_done;
}
@@ -909,6 +980,7 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
}
tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
+ tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
rxr->rx_prod = NEXT_RX(prod);
cons = NEXT_RX(cons);
@@ -927,32 +999,102 @@ static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
}
+static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
+ int payload_off, int tcp_ts,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ struct tcphdr *th;
+ int len, nw_off;
+ u16 outer_ip_off, inner_ip_off, inner_mac_off;
+ u32 hdr_info = tpa_info->hdr_info;
+ bool loopback = false;
+
+ inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
+ inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
+ outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
+
+ /* If the packet is an internal loopback packet, the offsets will
+ * have an extra 4 bytes.
+ */
+ if (inner_mac_off == 4) {
+ loopback = true;
+ } else if (inner_mac_off > 4) {
+ __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
+ ETH_HLEN - 2));
+
+ /* We only support inner iPv4/ipv6. If we don't see the
+ * correct protocol ID, it must be a loopback packet where
+ * the offsets are off by 4.
+ */
+ if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
+ loopback = true;
+ }
+ if (loopback) {
+ /* internal loopback packet, subtract all offsets by 4 */
+ inner_ip_off -= 4;
+ inner_mac_off -= 4;
+ outer_ip_off -= 4;
+ }
+
+ nw_off = inner_ip_off - ETH_HLEN;
+ skb_set_network_header(skb, nw_off);
+ if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+
+ skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
+ len = skb->len - skb_transport_offset(skb);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
+ } else {
+ struct iphdr *iph = ip_hdr(skb);
+
+ skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
+ len = skb->len - skb_transport_offset(skb);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
+ }
+
+ if (inner_mac_off) { /* tunnel */
+ struct udphdr *uh = NULL;
+ __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
+ ETH_HLEN - 2));
+
+ if (proto == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ } else {
+ struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
+
+ if (iph->nexthdr == IPPROTO_UDP)
+ uh = (struct udphdr *)(iph + 1);
+ }
+ if (uh) {
+ if (uh->check)
+ skb_shinfo(skb)->gso_type |=
+ SKB_GSO_UDP_TUNNEL_CSUM;
+ else
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
+ }
+ }
+#endif
+ return skb;
+}
+
#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
-static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
- struct rx_tpa_end_cmp *tpa_end,
- struct rx_tpa_end_cmp_ext *tpa_end1,
+static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
+ int payload_off, int tcp_ts,
struct sk_buff *skb)
{
#ifdef CONFIG_INET
struct tcphdr *th;
- int payload_off, tcp_opt_len = 0;
- int len, nw_off;
- u16 segs;
+ int len, nw_off, tcp_opt_len;
- segs = TPA_END_TPA_SEGS(tpa_end);
- if (segs == 1)
- return skb;
-
- NAPI_GRO_CB(skb)->count = segs;
- skb_shinfo(skb)->gso_size =
- le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
- skb_shinfo(skb)->gso_type = tpa_info->gso_type;
- payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
- RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
- RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
- if (TPA_END_GRO_TS(tpa_end))
+ if (tcp_ts)
tcp_opt_len = 12;
if (tpa_info->gso_type == SKB_GSO_TCPV4) {
@@ -1009,6 +1151,32 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
return skb;
}
+static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
+ struct bnxt_tpa_info *tpa_info,
+ struct rx_tpa_end_cmp *tpa_end,
+ struct rx_tpa_end_cmp_ext *tpa_end1,
+ struct sk_buff *skb)
+{
+#ifdef CONFIG_INET
+ int payload_off;
+ u16 segs;
+
+ segs = TPA_END_TPA_SEGS(tpa_end);
+ if (segs == 1)
+ return skb;
+
+ NAPI_GRO_CB(skb)->count = segs;
+ skb_shinfo(skb)->gso_size =
+ le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
+ skb_shinfo(skb)->gso_type = tpa_info->gso_type;
+ payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
+ RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
+ RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
+ skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
+#endif
+ return skb;
+}
+
static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
struct bnxt_napi *bnapi,
u32 *raw_cons,
@@ -1102,19 +1270,13 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
- if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
- netdev_features_t features = skb->dev->features;
+ if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u16 vlan_proto = tpa_info->metadata >>
RX_CMP_FLAGS2_METADATA_TPID_SFT;
+ u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_VID_MASK;
- if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- vlan_proto == ETH_P_8021Q) ||
- ((features & NETIF_F_HW_VLAN_STAG_RX) &&
- vlan_proto == ETH_P_8021AD)) {
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
- tpa_info->metadata &
- RX_CMP_FLAGS2_METADATA_VID_MASK);
- }
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
@@ -1125,7 +1287,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
}
if (TPA_END_GRO(tpa_end))
- skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
+ skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
return skb;
}
@@ -1267,19 +1429,14 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
skb->protocol = eth_type_trans(skb, dev);
- if (rxcmp1->rx_cmp_flags2 &
- cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
- netdev_features_t features = skb->dev->features;
+ if ((rxcmp1->rx_cmp_flags2 &
+ cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
+ (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
+ u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_VID_MASK;
u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
- if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
- vlan_proto == ETH_P_8021Q) ||
- ((features & NETIF_F_HW_VLAN_STAG_RX) &&
- vlan_proto == ETH_P_8021AD))
- __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
- meta_data &
- RX_CMP_FLAGS2_METADATA_VID_MASK);
+ __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
}
skb_checksum_none_assert(skb);
@@ -1313,6 +1470,10 @@ next_rx_no_prod:
return rc;
}
+#define BNXT_GET_EVENT_PORT(data) \
+ ((data) & \
+ HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
+
static int bnxt_async_event_process(struct bnxt *bp,
struct hwrm_async_event_cmpl *cmpl)
{
@@ -1320,12 +1481,45 @@ static int bnxt_async_event_process(struct bnxt *bp,
/* TODO CHIMP_FW: Define event id's for link change, error etc */
switch (event_id) {
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
+ u32 data1 = le32_to_cpu(cmpl->event_data1);
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (BNXT_VF(bp))
+ goto async_event_process_exit;
+ if (data1 & 0x20000) {
+ u16 fw_speed = link_info->force_link_speed;
+ u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
+
+ netdev_warn(bp->dev, "Link speed %d no longer supported\n",
+ speed);
+ }
+ /* fall thru */
+ }
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
break;
case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
break;
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
+ u32 data1 = le32_to_cpu(cmpl->event_data1);
+ u16 port_id = BNXT_GET_EVENT_PORT(data1);
+
+ if (BNXT_VF(bp))
+ break;
+
+ if (bp->pf.port_id != port_id)
+ break;
+
+ set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
+ break;
+ }
+ case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
+ if (BNXT_PF(bp))
+ goto async_event_process_exit;
+ set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
+ break;
default:
netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
event_id);
@@ -1452,7 +1646,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
/* The valid test of the entry must be done first before
* reading any further.
*/
- rmb();
+ dma_rmb();
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
@@ -1504,6 +1698,76 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
return rx_pkts;
}
+static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
+{
+ struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
+ struct bnxt *bp = bnapi->bp;
+ struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
+ struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
+ struct tx_cmp *txcmp;
+ struct rx_cmp_ext *rxcmp1;
+ u32 cp_cons, tmp_raw_cons;
+ u32 raw_cons = cpr->cp_raw_cons;
+ u32 rx_pkts = 0;
+ bool agg_event = false;
+
+ while (1) {
+ int rc;
+
+ cp_cons = RING_CMP(raw_cons);
+ txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ if (!TX_CMP_VALID(txcmp, raw_cons))
+ break;
+
+ if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
+ tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
+ cp_cons = RING_CMP(tmp_raw_cons);
+ rxcmp1 = (struct rx_cmp_ext *)
+ &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
+
+ if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
+ break;
+
+ /* force an error to recycle the buffer */
+ rxcmp1->rx_cmp_cfa_code_errors_v2 |=
+ cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
+
+ rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
+ if (likely(rc == -EIO))
+ rx_pkts++;
+ else if (rc == -EBUSY) /* partial completion */
+ break;
+ } else if (unlikely(TX_CMP_TYPE(txcmp) ==
+ CMPL_BASE_TYPE_HWRM_DONE)) {
+ bnxt_hwrm_handler(bp, txcmp);
+ } else {
+ netdev_err(bp->dev,
+ "Invalid completion received on special ring\n");
+ }
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+
+ if (rx_pkts == budget)
+ break;
+ }
+
+ cpr->cp_raw_cons = raw_cons;
+ BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+ writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
+
+ if (agg_event) {
+ writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
+ writel(DB_KEY_RX | rxr->rx_agg_prod, rxr->rx_agg_doorbell);
+ }
+
+ if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
+ napi_complete(napi);
+ BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
+ }
+ return rx_pkts;
+}
+
static int bnxt_poll(struct napi_struct *napi, int budget)
{
struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
@@ -2176,6 +2440,9 @@ static int bnxt_alloc_vnics(struct bnxt *bp)
num_vnics += bp->rx_nr_rings;
#endif
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ num_vnics++;
+
bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
GFP_KERNEL);
if (!bp->vnic_info)
@@ -2193,7 +2460,8 @@ static void bnxt_init_vnics(struct bnxt *bp)
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
vnic->fw_vnic_id = INVALID_HW_RING_ID;
- vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
+ vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
if (bp->vnic_info[i].rss_hash_key) {
@@ -2230,7 +2498,7 @@ static void bnxt_set_tpa_flags(struct bnxt *bp)
bp->flags &= ~BNXT_FLAG_TPA;
if (bp->dev->features & NETIF_F_LRO)
bp->flags |= BNXT_FLAG_LRO;
- if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+ if (bp->dev->features & NETIF_F_GRO)
bp->flags |= BNXT_FLAG_GRO;
}
@@ -2497,7 +2765,7 @@ static int bnxt_alloc_stats(struct bnxt *bp)
cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
}
- if (BNXT_PF(bp)) {
+ if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
sizeof(struct tx_port_stats) + 1024;
@@ -2729,7 +2997,7 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
int timeout, bool silent)
{
- int i, intr_process, rc;
+ int i, intr_process, rc, tmo_count;
struct input *req = msg;
u32 *data = msg;
__le32 *resp_len, *valid;
@@ -2758,11 +3026,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
timeout = DFLT_HWRM_CMD_TIMEOUT;
i = 0;
+ tmo_count = timeout * 40;
if (intr_process) {
/* Wait until hwrm response cmpl interrupt is processed */
while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
- i++ < timeout) {
- usleep_range(600, 800);
+ i++ < tmo_count) {
+ usleep_range(25, 40);
}
if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
@@ -2773,30 +3042,30 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
} else {
/* Check if response len is updated */
resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
- for (i = 0; i < timeout; i++) {
+ for (i = 0; i < tmo_count; i++) {
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
HWRM_RESP_LEN_SFT;
if (len)
break;
- usleep_range(600, 800);
+ usleep_range(25, 40);
}
- if (i >= timeout) {
+ if (i >= tmo_count) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
timeout, le16_to_cpu(req->req_type),
- le16_to_cpu(req->seq_id), *resp_len);
+ le16_to_cpu(req->seq_id), len);
return -1;
}
/* Last word of resp contains valid bit */
valid = bp->hwrm_cmd_resp_addr + len - 4;
- for (i = 0; i < timeout; i++) {
+ for (i = 0; i < 5; i++) {
if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
break;
- usleep_range(600, 800);
+ udelay(1);
}
- if (i >= timeout) {
+ if (i >= 5) {
netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
timeout, le16_to_cpu(req->req_type),
le16_to_cpu(req->seq_id), len, *valid);
@@ -2842,6 +3111,8 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
{
struct hwrm_func_drv_rgtr_input req = {0};
int i;
+ DECLARE_BITMAP(async_events_bmap, 256);
+ u32 *events = (u32 *)async_events_bmap;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
@@ -2850,11 +3121,14 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
FUNC_DRV_RGTR_REQ_ENABLES_VER |
FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
- /* TODO: current async event fwd bits are not defined and the firmware
- * only checks if it is non-zero to enable async event forwarding
- */
- req.async_event_fwd[0] |= cpu_to_le32(1);
- req.os_type = cpu_to_le16(1);
+ memset(async_events_bmap, 0, sizeof(async_events_bmap));
+ for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
+ __set_bit(bnxt_async_events_arr[i], async_events_bmap);
+
+ for (i = 0; i < 8; i++)
+ req.async_event_fwd[i] |= cpu_to_le32(events[i]);
+
+ req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
req.ver_maj = DRV_VER_MAJ;
req.ver_min = DRV_VER_MIN;
req.ver_upd = DRV_VER_UPD;
@@ -2993,7 +3267,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
- req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
+ req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
@@ -3030,8 +3304,10 @@ static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
- req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
- CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
+ req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
+ if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ req.flags |=
+ cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
req.enables =
cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
@@ -3138,7 +3414,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_rss_cfg_input req = {0};
- if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
+ if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
@@ -3150,10 +3426,14 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
req.hash_type = cpu_to_le32(vnic->hash_type);
- if (vnic->flags & BNXT_VNIC_RSS_FLAG)
- max_rings = bp->rx_nr_rings;
- else
+ if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ max_rings = bp->rx_nr_rings - 1;
+ else
+ max_rings = bp->rx_nr_rings;
+ } else {
max_rings = 1;
+ }
/* Fill the RSS indirection table with ring group ids */
for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
@@ -3166,7 +3446,7 @@ static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
req.hash_key_tbl_addr =
cpu_to_le64(vnic->rss_hash_key_dma_addr);
}
- req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
+ req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
@@ -3189,32 +3469,35 @@ static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
-static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
+static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
+ u16 ctx_idx)
{
struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
req.rss_cos_lb_ctx_id =
- cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
+ cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
}
static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
{
- int i;
+ int i, j;
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
- if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
- bnxt_hwrm_vnic_ctx_free_one(bp, i);
+ for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
+ if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
+ bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
+ }
}
bp->rsscos_nr_ctxs = 0;
}
-static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
+static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
{
int rc;
struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
@@ -3227,7 +3510,7 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc)
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
le16_to_cpu(resp->rss_cos_lb_ctx_id);
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -3239,17 +3522,34 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
unsigned int ring = 0, grp_idx;
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
struct hwrm_vnic_cfg_input req = {0};
+ u16 def_vlan = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
+
+ req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
/* Only RSS support for now TBD: COS & LB */
- req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
- VNIC_CFG_REQ_ENABLES_RSS_RULE);
- req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
- req.cos_rule = cpu_to_le16(0xffff);
+ if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
+ req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
+ req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
+ VNIC_CFG_REQ_ENABLES_MRU);
+ } else {
+ req.rss_rule = cpu_to_le16(0xffff);
+ }
+
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
+ (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
+ req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
+ req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
+ } else {
+ req.cos_rule = cpu_to_le16(0xffff);
+ }
+
if (vnic->flags & BNXT_VNIC_RSS_FLAG)
ring = 0;
else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
ring = vnic_id - 1;
+ else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
+ ring = bp->rx_nr_rings - 1;
grp_idx = bp->rx_ring[ring].bnapi->index;
req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
@@ -3259,7 +3559,11 @@ static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
VLAN_HLEN);
- if (bp->flags & BNXT_FLAG_STRIP_VLAN)
+#ifdef CONFIG_BNXT_SRIOV
+ if (BNXT_VF(bp))
+ def_vlan = bp->vf.vlan;
+#endif
+ if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
@@ -3313,7 +3617,8 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
bp->grp_info[grp_idx].fw_grp_id;
}
- bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
+ bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
if (vnic_id == 0)
req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
@@ -3746,6 +4051,9 @@ static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
if (!bp->bnapi)
return 0;
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ return 0;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
@@ -3774,9 +4082,12 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
struct hwrm_stat_ctx_alloc_input req = {0};
struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ return 0;
+
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
- req.update_period_ms = cpu_to_le32(1000);
+ req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < bp->cp_nr_rings; i++) {
@@ -3798,6 +4109,39 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
return 0;
}
+static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_input req = {0};
+ struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
+ req.fid = cpu_to_le16(0xffff);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto func_qcfg_exit;
+
+#ifdef CONFIG_BNXT_SRIOV
+ if (BNXT_VF(bp)) {
+ struct bnxt_vf_info *vf = &bp->vf;
+
+ vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
+ }
+#endif
+ switch (resp->port_partition_type) {
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
+ case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
+ bp->port_partition_type = resp->port_partition_type;
+ break;
+ }
+
+func_qcfg_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
int bnxt_hwrm_func_qcaps(struct bnxt *bp)
{
int rc = 0;
@@ -3817,7 +4161,8 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->fw_fid = le16_to_cpu(resp->fid);
pf->port_id = le16_to_cpu(resp->port_id);
- memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+ bp->dev->dev_port = pf->port_id;
+ memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
memcpy(bp->dev->dev_addr, pf->mac_addr, ETH_ALEN);
pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
@@ -3842,7 +4187,7 @@ int bnxt_hwrm_func_qcaps(struct bnxt *bp)
struct bnxt_vf_info *vf = &bp->vf;
vf->fw_fid = le16_to_cpu(resp->fid);
- memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
+ memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
if (is_valid_ether_addr(vf->mac_addr))
/* overwrite netdev dev_adr with admin VF MAC */
memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
@@ -3933,6 +4278,8 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
+ bp->hwrm_spec_code = resp->hwrm_intf_maj << 16 |
+ resp->hwrm_intf_min << 8 | resp->hwrm_intf_upd;
if (resp->hwrm_intf_maj < 1) {
netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
resp->hwrm_intf_maj, resp->hwrm_intf_min,
@@ -3950,6 +4297,11 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp)
if (resp->hwrm_intf_maj >= 1)
bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
+ bp->chip_num = le16_to_cpu(resp->chip_num);
+ if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
+ !resp->chip_metal)
+ bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
+
hwrm_ver_get_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
@@ -4038,7 +4390,7 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
int rc;
/* allocate context for vnic */
- rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
vnic_id, rc);
@@ -4046,6 +4398,16 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
}
bp->rsscos_nr_ctxs++;
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
+ vnic_id, rc);
+ goto vnic_setup_err;
+ }
+ bp->rsscos_nr_ctxs++;
+ }
+
/* configure default vnic, ring grp */
rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
if (rc) {
@@ -4103,6 +4465,36 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
#endif
}
+/* Allow PF and VF with default VLAN to be in promiscuous mode */
+static bool bnxt_promisc_ok(struct bnxt *bp)
+{
+#ifdef CONFIG_BNXT_SRIOV
+ if (BNXT_VF(bp) && !bp->vf.vlan)
+ return false;
+#endif
+ return true;
+}
+
+static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
+{
+ unsigned int rc = 0;
+
+ rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
+ if (rc) {
+ netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
+ rc);
+ return rc;
+ }
+
+ rc = bnxt_hwrm_vnic_cfg(bp, 1);
+ if (rc) {
+ netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
+ rc);
+ return rc;
+ }
+ return rc;
+}
+
static int bnxt_cfg_rx_mode(struct bnxt *);
static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
@@ -4110,6 +4502,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
int rc = 0;
+ unsigned int rx_nr_rings = bp->rx_nr_rings;
if (irq_re_init) {
rc = bnxt_hwrm_stat_ctx_alloc(bp);
@@ -4132,8 +4525,11 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
goto err_out;
}
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ rx_nr_rings--;
+
/* default vnic 0 */
- rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
+ rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
if (rc) {
netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
goto err_out;
@@ -4168,7 +4564,7 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
- if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+ if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
if (bp->dev->flags & IFF_ALLMULTI) {
@@ -4188,7 +4584,19 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
rc = bnxt_hwrm_set_coal(bp);
if (rc)
netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
- rc);
+ rc);
+
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ rc = bnxt_setup_nitroa0_vnic(bp);
+ if (rc)
+ netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
+ rc);
+ }
+
+ if (BNXT_VF(bp)) {
+ bnxt_hwrm_func_qcfg(bp);
+ netdev_update_features(bp->dev);
+ }
return 0;
@@ -4492,14 +4900,23 @@ static void bnxt_del_napi(struct bnxt *bp)
static void bnxt_init_napi(struct bnxt *bp)
{
int i;
+ unsigned int cp_nr_rings = bp->cp_nr_rings;
struct bnxt_napi *bnapi;
if (bp->flags & BNXT_FLAG_USING_MSIX) {
- for (i = 0; i < bp->cp_nr_rings; i++) {
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ cp_nr_rings--;
+ for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
netif_napi_add(bp->dev, &bnapi->napi,
bnxt_poll, 64);
}
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ bnapi = bp->bnapi[cp_nr_rings];
+ netif_napi_add(bp->dev, &bnapi->napi,
+ bnxt_poll_nitroa0, 64);
+ napi_hash_add(&bnapi->napi);
+ }
} else {
bnapi = bp->bnapi[0];
netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
@@ -4540,9 +4957,7 @@ static void bnxt_tx_disable(struct bnxt *bp)
for (i = 0; i < bp->tx_nr_rings; i++) {
txr = &bp->tx_ring[i];
txq = netdev_get_tx_queue(bp->dev, i);
- __netif_tx_lock(txq, smp_processor_id());
txr->dev_state = BNXT_DEV_STATE_CLOSING;
- __netif_tx_unlock(txq);
}
}
/* Stop all TX queues */
@@ -4589,12 +5004,52 @@ static void bnxt_report_link(struct bnxt *bp)
speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
speed, duplex, flow_ctrl);
+ if (bp->flags & BNXT_FLAG_EEE_CAP)
+ netdev_info(bp->dev, "EEE is %s\n",
+ bp->eee.eee_active ? "active" :
+ "not active");
} else {
netif_carrier_off(bp->dev);
netdev_err(bp->dev, "NIC Link is Down\n");
}
}
+static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
+{
+ int rc = 0;
+ struct hwrm_port_phy_qcaps_input req = {0};
+ struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (bp->hwrm_spec_code < 0x10201)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
+
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ goto hwrm_phy_qcaps_exit;
+
+ if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
+ struct ethtool_eee *eee = &bp->eee;
+ u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
+
+ bp->flags |= BNXT_FLAG_EEE_CAP;
+ eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
+ PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
+ bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
+ PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
+ }
+ link_info->support_auto_speeds =
+ le16_to_cpu(resp->supported_speeds_auto_mode);
+
+hwrm_phy_qcaps_exit:
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
{
int rc = 0;
@@ -4626,7 +5081,6 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
else
link_info->link_speed = 0;
link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
- link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
link_info->support_speeds = le16_to_cpu(resp->support_speeds);
link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
link_info->lp_auto_link_speeds =
@@ -4636,9 +5090,47 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
link_info->phy_ver[1] = resp->phy_min;
link_info->phy_ver[2] = resp->phy_bld;
link_info->media_type = resp->media_type;
- link_info->transceiver = resp->transceiver_type;
- link_info->phy_addr = resp->phy_addr;
+ link_info->phy_type = resp->phy_type;
+ link_info->transceiver = resp->xcvr_pkg_type;
+ link_info->phy_addr = resp->eee_config_phy_addr &
+ PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
+ link_info->module_status = resp->module_status;
+
+ if (bp->flags & BNXT_FLAG_EEE_CAP) {
+ struct ethtool_eee *eee = &bp->eee;
+ u16 fw_speeds;
+
+ eee->eee_active = 0;
+ if (resp->eee_config_phy_addr &
+ PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
+ eee->eee_active = 1;
+ fw_speeds = le16_to_cpu(
+ resp->link_partner_adv_eee_link_speed_mask);
+ eee->lp_advertised =
+ _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ }
+
+ /* Pull initial EEE config */
+ if (!chng_link_state) {
+ if (resp->eee_config_phy_addr &
+ PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
+ eee->eee_enabled = 1;
+
+ fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
+ eee->advertised =
+ _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
+ if (resp->eee_config_phy_addr &
+ PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
+ __le32 tmr;
+
+ eee->tx_lpi_enabled = 1;
+ tmr = resp->xcvr_identifier_type_tx_lpi_timer;
+ eee->tx_lpi_timer = le32_to_cpu(tmr) &
+ PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
+ }
+ }
+ }
/* TODO: need to add more logic to report VF link */
if (chng_link_state) {
if (link_info->phy_link_status == BNXT_LINK_LINK)
@@ -4655,10 +5147,40 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
return 0;
}
+static void bnxt_get_port_module_status(struct bnxt *bp)
+{
+ struct bnxt_link_info *link_info = &bp->link_info;
+ struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
+ u8 module_status;
+
+ if (bnxt_update_link(bp, true))
+ return;
+
+ module_status = link_info->module_status;
+ switch (module_status) {
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
+ case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
+ netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
+ bp->pf.port_id);
+ if (bp->hwrm_spec_code >= 0x10201) {
+ netdev_warn(bp->dev, "Module part number %s\n",
+ resp->phy_vendor_partnumber);
+ }
+ if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
+ netdev_warn(bp->dev, "TX is disabled\n");
+ if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
+ netdev_warn(bp->dev, "SFP+ module is shutdown\n");
+ }
+}
+
static void
bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
{
if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
+ if (bp->hwrm_spec_code >= 0x10201)
+ req->auto_pause =
+ PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
@@ -4672,6 +5194,11 @@ bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
req->enables |=
cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
+ if (bp->hwrm_spec_code >= 0x10201) {
+ req->auto_pause = req->force_pause;
+ req->enables |= cpu_to_le32(
+ PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
+ }
}
}
@@ -4684,7 +5211,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp,
if (autoneg & BNXT_AUTONEG_SPEED) {
req->auto_mode |=
- PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
+ PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
req->enables |= cpu_to_le32(
PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
@@ -4698,9 +5225,6 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp,
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
}
- /* currently don't support half duplex */
- req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
- req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
/* tell chimp that the setting takes effect immediately */
req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
}
@@ -4735,7 +5259,30 @@ int bnxt_hwrm_set_pause(struct bnxt *bp)
return rc;
}
-int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
+static void bnxt_hwrm_set_eee(struct bnxt *bp,
+ struct hwrm_port_phy_cfg_input *req)
+{
+ struct ethtool_eee *eee = &bp->eee;
+
+ if (eee->eee_enabled) {
+ u16 eee_speeds;
+ u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
+
+ if (eee->tx_lpi_enabled)
+ flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
+ else
+ flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
+
+ req->flags |= cpu_to_le32(flags);
+ eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
+ req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
+ req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
+ } else {
+ req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
+ }
+}
+
+int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
{
struct hwrm_port_phy_cfg_input req = {0};
@@ -4744,14 +5291,57 @@ int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
bnxt_hwrm_set_pause_common(bp, &req);
bnxt_hwrm_set_link_common(bp, &req);
+
+ if (set_eee)
+ bnxt_hwrm_set_eee(bp, &req);
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
+static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
+{
+ struct hwrm_port_phy_cfg_input req = {0};
+
+ if (!BNXT_SINGLE_PF(bp))
+ return 0;
+
+ if (pci_num_vf(bp->pdev))
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
+ req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN);
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static bool bnxt_eee_config_ok(struct bnxt *bp)
+{
+ struct ethtool_eee *eee = &bp->eee;
+ struct bnxt_link_info *link_info = &bp->link_info;
+
+ if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+ return true;
+
+ if (eee->eee_enabled) {
+ u32 advertising =
+ _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+ eee->eee_enabled = 0;
+ return false;
+ }
+ if (eee->advertised & ~advertising) {
+ eee->advertised = advertising & eee->supported;
+ return false;
+ }
+ }
+ return true;
+}
+
static int bnxt_update_phy_setting(struct bnxt *bp)
{
int rc;
bool update_link = false;
bool update_pause = false;
+ bool update_eee = false;
struct bnxt_link_info *link_info = &bp->link_info;
rc = bnxt_update_link(bp, true);
@@ -4761,7 +5351,8 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
return rc;
}
if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
- link_info->auto_pause_setting != link_info->req_flow_ctrl)
+ (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
+ link_info->req_flow_ctrl)
update_pause = true;
if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
link_info->force_pause_setting != link_info->req_flow_ctrl)
@@ -4780,8 +5371,11 @@ static int bnxt_update_phy_setting(struct bnxt *bp)
update_link = true;
}
+ if (!bnxt_eee_config_ok(bp))
+ update_eee = true;
+
if (update_link)
- rc = bnxt_hwrm_set_link_setting(bp, update_pause);
+ rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
else if (update_pause)
rc = bnxt_hwrm_set_pause(bp);
if (rc) {
@@ -4857,22 +5451,16 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
netdev_warn(bp->dev, "failed to update phy settings\n");
}
- if (irq_re_init) {
-#if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
- vxlan_get_rx_port(bp->dev);
-#endif
- if (!bnxt_hwrm_tunnel_dst_port_alloc(
- bp, htons(0x17c1),
- TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
- bp->nge_port_cnt = 1;
- }
+ if (irq_re_init)
+ udp_tunnel_get_rx_info(bp->dev);
set_bit(BNXT_STATE_OPEN, &bp->state);
bnxt_enable_int(bp);
/* Enable TX queues */
bnxt_tx_enable(bp);
mod_timer(&bp->timer, jiffies + bp->current_interval);
- bnxt_update_link(bp, true);
+ /* Poll link status and check for SFP+ module status */
+ bnxt_get_port_module_status(bp);
return 0;
@@ -4905,12 +5493,19 @@ static int bnxt_open(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
int rc = 0;
- rc = bnxt_hwrm_func_reset(bp);
- if (rc) {
- netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
- rc);
- rc = -1;
- return rc;
+ if (!test_bit(BNXT_STATE_FN_RST_DONE, &bp->state)) {
+ rc = bnxt_hwrm_func_reset(bp);
+ if (rc) {
+ netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
+ rc);
+ rc = -EBUSY;
+ return rc;
+ }
+ /* Do func_reset during the 1st PF open only to prevent killing
+ * the VFs when the PF is brought down and up.
+ */
+ if (BNXT_PF(bp))
+ set_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
}
return __bnxt_open_nic(bp, true, true);
}
@@ -4972,6 +5567,7 @@ static int bnxt_close(struct net_device *dev)
struct bnxt *bp = netdev_priv(dev);
bnxt_close_nic(bp, true, true);
+ bnxt_hwrm_shutdown_link(bp);
return 0;
}
@@ -5129,8 +5725,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
- /* Only allow PF to be in promiscuous mode */
- if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
+ if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
uc_update = bnxt_uc_list_updated(bp);
@@ -5222,8 +5817,12 @@ static bool bnxt_rfs_capable(struct bnxt *bp)
return false;
vnics = 1 + bp->rx_nr_rings;
- if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics)
+ if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) {
+ netdev_warn(bp->dev,
+ "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
+ min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1));
return false;
+ }
return true;
#else
@@ -5236,8 +5835,29 @@ static netdev_features_t bnxt_fix_features(struct net_device *dev,
{
struct bnxt *bp = netdev_priv(dev);
- if (!bnxt_rfs_capable(bp))
+ if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
features &= ~NETIF_F_NTUPLE;
+
+ /* Both CTAG and STAG VLAN accelaration on the RX side have to be
+ * turned on or off together.
+ */
+ if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
+ (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
+ if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ else
+ features |= NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX;
+ }
+#ifdef CONFIG_BNXT_SRIOV
+ if (BNXT_VF(bp)) {
+ if (bp->vf.vlan) {
+ features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_STAG_RX);
+ }
+ }
+#endif
return features;
}
@@ -5251,7 +5871,7 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
bool update_tpa = false;
flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
- if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
+ if ((features & NETIF_F_GRO) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
flags |= BNXT_FLAG_GRO;
if (features & NETIF_F_LRO)
flags |= BNXT_FLAG_LRO;
@@ -5353,9 +5973,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp)
}
}
-static void bnxt_reset_task(struct bnxt *bp)
+static void bnxt_reset_task(struct bnxt *bp, bool silent)
{
- bnxt_dbg_dump_states(bp);
+ if (!silent)
+ bnxt_dbg_dump_states(bp);
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
bnxt_open_nic(bp, false, false);
@@ -5406,6 +6027,23 @@ bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
+/* Only called from bnxt_sp_task() */
+static void bnxt_reset(struct bnxt *bp, bool silent)
+{
+ /* bnxt_reset_task() calls bnxt_close_nic() which waits
+ * for BNXT_STATE_IN_SP_TASK to clear.
+ * If there is a parallel dev_close(), bnxt_close() may be holding
+ * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
+ * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
+ */
+ clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_lock();
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_reset_task(bp, silent);
+ set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
+ rtnl_unlock();
+}
+
static void bnxt_cfg_ntp_filters(struct bnxt *);
static void bnxt_sp_task(struct work_struct *work)
@@ -5442,16 +6080,23 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
}
- if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
- /* bnxt_reset_task() calls bnxt_close_nic() which waits
- * for BNXT_STATE_IN_SP_TASK to clear.
- */
- clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_lock();
- bnxt_reset_task(bp);
- set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_unlock();
+ if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_tunnel_dst_port_alloc(
+ bp, bp->nge_port,
+ TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
+ }
+ if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
+ bnxt_hwrm_tunnel_dst_port_free(
+ bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
}
+ if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
+ bnxt_reset(bp, false);
+
+ if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
+ bnxt_reset(bp, true);
+
+ if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
+ bnxt_get_port_module_status(bp);
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
bnxt_hwrm_port_qstats(bp);
@@ -5539,6 +6184,8 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->tx_coal_ticks_irq = 2;
bp->tx_coal_bufs_irq = 2;
+ bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
+
init_timer(&bp->timer);
bp->timer.data = (unsigned long)bp;
bp->timer.function = bnxt_timer;
@@ -5583,10 +6230,9 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
-#ifdef CONFIG_BNXT_SRIOV
- if (BNXT_VF(bp) && is_valid_ether_addr(bp->vf.mac_addr))
- return -EADDRNOTAVAIL;
-#endif
+ rc = bnxt_approve_mac(bp, addr->sa_data);
+ if (rc)
+ return rc;
if (ether_addr_equal(addr->sa_data, dev->dev_addr))
return 0;
@@ -5605,7 +6251,7 @@ static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
{
struct bnxt *bp = netdev_priv(dev);
- if (new_mtu < 60 || new_mtu > 9000)
+ if (new_mtu < 60 || new_mtu > 9500)
return -EINVAL;
if (netif_running(dev))
@@ -5684,7 +6330,8 @@ static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
keys1->ports.ports == keys2->ports.ports &&
keys1->basic.ip_proto == keys2->basic.ip_proto &&
keys1->basic.n_proto == keys2->basic.n_proto &&
- ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
+ ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
+ ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
return true;
return false;
@@ -5697,12 +6344,28 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
struct bnxt_ntuple_filter *fltr, *new_fltr;
struct flow_keys *fkeys;
struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
- int rc = 0, idx, bit_id;
+ int rc = 0, idx, bit_id, l2_idx = 0;
struct hlist_head *head;
if (skb->encapsulation)
return -EPROTONOSUPPORT;
+ if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
+ int off = 0, j;
+
+ netif_addr_lock_bh(dev);
+ for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
+ if (ether_addr_equal(eth->h_dest,
+ vnic->uc_list + off)) {
+ l2_idx = j + 1;
+ break;
+ }
+ }
+ netif_addr_unlock_bh(dev);
+ if (!l2_idx)
+ return -EINVAL;
+ }
new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
if (!new_fltr)
return -ENOMEM;
@@ -5720,6 +6383,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
goto err_free;
}
+ memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
@@ -5745,6 +6409,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
new_fltr->sw_id = (u16)bit_id;
new_fltr->flow_id = flow_id;
+ new_fltr->l2_fltr_idx = l2_idx;
new_fltr->rxq = rxq_index;
hlist_add_head_rcu(&new_fltr->hash, head);
bp->ntp_fltr_count++;
@@ -5814,47 +6479,83 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
#endif /* CONFIG_RFS_ACCEL */
-static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
- __be16 port)
+static void bnxt_udp_tunnel_add(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct bnxt *bp = netdev_priv(dev);
- if (!netif_running(dev))
+ if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
return;
- if (sa_family != AF_INET6 && sa_family != AF_INET)
+ if (!netif_running(dev))
return;
- if (bp->vxlan_port_cnt && bp->vxlan_port != port)
- return;
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
+ return;
- bp->vxlan_port_cnt++;
- if (bp->vxlan_port_cnt == 1) {
- bp->vxlan_port = port;
- set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
+ bp->vxlan_port_cnt++;
+ if (bp->vxlan_port_cnt == 1) {
+ bp->vxlan_port = ti->port;
+ set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
+ schedule_work(&bp->sp_task);
+ }
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (bp->nge_port_cnt && bp->nge_port != ti->port)
+ return;
+
+ bp->nge_port_cnt++;
+ if (bp->nge_port_cnt == 1) {
+ bp->nge_port = ti->port;
+ set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
+ }
+ break;
+ default:
+ return;
}
+
+ schedule_work(&bp->sp_task);
}
-static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
- __be16 port)
+static void bnxt_udp_tunnel_del(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct bnxt *bp = netdev_priv(dev);
- if (!netif_running(dev))
+ if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
return;
- if (sa_family != AF_INET6 && sa_family != AF_INET)
+ if (!netif_running(dev))
return;
- if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
+ return;
bp->vxlan_port_cnt--;
- if (bp->vxlan_port_cnt == 0) {
- set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
- schedule_work(&bp->sp_task);
- }
+ if (bp->vxlan_port_cnt != 0)
+ return;
+
+ set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!bp->nge_port_cnt || bp->nge_port != ti->port)
+ return;
+ bp->nge_port_cnt--;
+
+ if (bp->nge_port_cnt != 0)
+ return;
+
+ set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
+ break;
+ default:
+ return;
}
+
+ schedule_work(&bp->sp_task);
}
static const struct net_device_ops bnxt_netdev_ops = {
@@ -5885,8 +6586,8 @@ static const struct net_device_ops bnxt_netdev_ops = {
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = bnxt_rx_flow_steer,
#endif
- .ndo_add_vxlan_port = bnxt_add_vxlan_port,
- .ndo_del_vxlan_port = bnxt_del_vxlan_port,
+ .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
+ .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = bnxt_busy_poll,
#endif
@@ -5921,6 +6622,13 @@ static int bnxt_probe_phy(struct bnxt *bp)
int rc = 0;
struct bnxt_link_info *link_info = &bp->link_info;
+ rc = bnxt_hwrm_phy_qcaps(bp);
+ if (rc) {
+ netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
+ rc);
+ return rc;
+ }
+
rc = bnxt_update_link(bp, false);
if (rc) {
netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
@@ -5928,17 +6636,32 @@ static int bnxt_probe_phy(struct bnxt *bp)
return rc;
}
+ /* Older firmware does not have supported_auto_speeds, so assume
+ * that all supported speeds can be autonegotiated.
+ */
+ if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
+ link_info->support_auto_speeds = link_info->support_speeds;
+
/*initialize the ethool setting copy with NVM settings */
if (BNXT_AUTO_MODE(link_info->auto_mode)) {
- link_info->autoneg = BNXT_AUTONEG_SPEED |
- BNXT_AUTONEG_FLOW_CTRL;
+ link_info->autoneg = BNXT_AUTONEG_SPEED;
+ if (bp->hwrm_spec_code >= 0x10201) {
+ if (link_info->auto_pause_setting &
+ PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ } else {
+ link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ }
link_info->advertising = link_info->auto_link_speeds;
- link_info->req_flow_ctrl = link_info->auto_pause_setting;
} else {
link_info->req_link_speed = link_info->force_link_speed;
link_info->req_duplex = link_info->duplex_setting;
- link_info->req_flow_ctrl = link_info->force_pause_setting;
}
+ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
+ link_info->req_flow_ctrl =
+ link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
+ else
+ link_info->req_flow_ctrl = link_info->force_pause_setting;
return rc;
}
@@ -5974,7 +6697,10 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
*max_cp = min_t(int, *max_cp, bp->pf.max_stat_ctxs);
max_ring_grps = bp->pf.max_hw_ring_grps;
}
-
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
+ *max_cp -= 1;
+ *max_rx -= 2;
+ }
if (bp->flags & BNXT_FLAG_AGG_RINGS)
*max_rx >>= 1;
*max_rx = min_t(int, *max_rx, max_ring_grps);
@@ -6010,9 +6736,29 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
bp->num_stat_ctxs = bp->cp_nr_rings;
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ bp->rx_nr_rings++;
+ bp->cp_nr_rings++;
+ }
return rc;
}
+static void bnxt_parse_log_pcie_link(struct bnxt *bp)
+{
+ enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
+ enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
+
+ if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
+ speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
+ netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
+ else
+ netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
+ speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
+ speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
+ speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
+ "Unknown", width);
+}
+
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int version_printed;
@@ -6020,6 +6766,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct bnxt *bp;
int rc, max_irqs;
+ if (pdev->device == 0x16cd && pci_is_bridge(pdev))
+ return -ENODEV;
+
if (version_printed++ == 0)
pr_info("%s", version);
@@ -6046,18 +6795,34 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
+ rc = bnxt_alloc_hwrm_resources(bp);
+ if (rc)
+ goto init_err;
+
+ mutex_init(&bp->hwrm_cmd_lock);
+ rc = bnxt_hwrm_ver_get(bp);
+ if (rc)
+ goto init_err;
+
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
- NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
- NETIF_F_RXHASH |
- NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
+ NETIF_F_RXCSUM | NETIF_F_GRO;
+
+ if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
+ dev->hw_features |= NETIF_F_LRO;
dev->hw_enc_features =
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
- NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
+ dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_GRE_CSUM;
dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
@@ -6067,12 +6832,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
#endif
- rc = bnxt_alloc_hwrm_resources(bp);
- if (rc)
- goto init_err;
-
- mutex_init(&bp->hwrm_cmd_lock);
- bnxt_hwrm_ver_get(bp);
+ bp->gro_func = bnxt_gro_func_5730x;
+ if (BNXT_CHIP_NUM_57X1X(bp->chip_num))
+ bp->gro_func = bnxt_gro_func_5731x;
rc = bnxt_hwrm_func_drv_rgtr(bp);
if (rc)
@@ -6095,6 +6857,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto init_err;
}
+ bnxt_hwrm_func_qcfg(bp);
+
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
if (BNXT_PF(bp))
@@ -6105,7 +6869,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
bnxt_set_dflt_rings(bp);
- if (BNXT_PF(bp)) {
+ if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) {
dev->hw_features |= NETIF_F_NTUPLE;
if (bnxt_rfs_capable(bp)) {
bp->flags |= BNXT_FLAG_RFS;
@@ -6128,6 +6892,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
board_info[ent->driver_data].name,
(long)pci_resource_start(pdev, 0), dev->dev_addr);
+ bnxt_parse_log_pcie_link(bp);
+
return 0;
init_err:
@@ -6152,6 +6918,7 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
+ struct bnxt *bp = netdev_priv(netdev);
netdev_info(netdev, "PCI I/O error detected\n");
@@ -6166,6 +6933,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
if (netif_running(netdev))
bnxt_close(netdev);
+ /* So that func_reset will be done during slot_reset */
+ clear_bit(BNXT_STATE_FN_RST_DONE, &bp->state);
pci_disable_device(pdev);
rtnl_unlock();
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index de9d53eee3dd..23e04a6142fb 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,10 +11,10 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
-#define DRV_MODULE_VERSION "1.0.0"
+#define DRV_MODULE_VERSION "1.3.0"
#define DRV_VER_MAJ 1
-#define DRV_VER_MIN 0
+#define DRV_VER_MIN 3
#define DRV_VER_UPD 0
struct tx_bd {
@@ -298,13 +298,14 @@ struct rx_tpa_start_cmp_ext {
#define RX_TPA_START_CMP_FLAGS2_L4_CS_CALC (0x1 << 1)
#define RX_TPA_START_CMP_FLAGS2_T_IP_CS_CALC (0x1 << 2)
#define RX_TPA_START_CMP_FLAGS2_T_L4_CS_CALC (0x1 << 3)
+ #define RX_TPA_START_CMP_FLAGS2_IP_TYPE (0x1 << 8)
__le32 rx_tpa_start_cmp_metadata;
__le32 rx_tpa_start_cmp_cfa_code_v2;
#define RX_TPA_START_CMP_V2 (0x1 << 0)
#define RX_TPA_START_CMP_CFA_CODE (0xffff << 16)
#define RX_TPA_START_CMPL_CFA_CODE_SHIFT 16
- __le32 rx_tpa_start_cmp_unused5;
+ __le32 rx_tpa_start_cmp_hdr_info;
};
struct rx_tpa_end_cmp {
@@ -358,7 +359,8 @@ struct rx_tpa_end_cmp {
RX_TPA_END_CMP_FLAGS_PLACEMENT_ANY_GRO)
#define TPA_END_GRO_TS(rx_tpa_end) \
- ((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & cpu_to_le32(RX_TPA_END_GRO_TS))
+ (!!((rx_tpa_end)->rx_tpa_end_cmp_tsdelta & \
+ cpu_to_le32(RX_TPA_END_GRO_TS)))
struct rx_tpa_end_cmp_ext {
__le32 rx_tpa_end_cmp_dup_acks;
@@ -425,10 +427,17 @@ struct rx_tpa_end_cmp_ext {
#define MAX_TPA 64
+#if (BNXT_PAGE_SHIFT == 16)
+#define MAX_RX_PAGES 1
+#define MAX_RX_AGG_PAGES 4
+#define MAX_TX_PAGES 1
+#define MAX_CP_PAGES 8
+#else
#define MAX_RX_PAGES 8
#define MAX_RX_AGG_PAGES 32
#define MAX_TX_PAGES 8
#define MAX_CP_PAGES 64
+#endif
#define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd))
#define TX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct tx_bd))
@@ -577,6 +586,19 @@ struct bnxt_tpa_info {
u32 metadata;
enum pkt_hash_types hash_type;
u32 rss_hash;
+ u32 hdr_info;
+
+#define BNXT_TPA_L4_SIZE(hdr_info) \
+ (((hdr_info) & 0xf8000000) ? ((hdr_info) >> 27) : 32)
+
+#define BNXT_TPA_INNER_L3_OFF(hdr_info) \
+ (((hdr_info) >> 18) & 0x1ff)
+
+#define BNXT_TPA_INNER_L2_OFF(hdr_info) \
+ (((hdr_info) >> 9) & 0x1ff)
+
+#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
+ ((hdr_info) & 0x1ff)
};
struct bnxt_rx_ring_info {
@@ -673,7 +695,8 @@ struct bnxt_ring_grp_info {
struct bnxt_vnic_info {
u16 fw_vnic_id; /* returned by Chimp during alloc */
- u16 fw_rss_cos_lb_ctx;
+#define BNXT_MAX_CTX_PER_VNIC 2
+ u16 fw_rss_cos_lb_ctx[BNXT_MAX_CTX_PER_VNIC];
u16 fw_l2_ctx_id;
#define BNXT_MAX_UC_ADDRS 4
__le64 fw_l2_filter_id[BNXT_MAX_UC_ADDRS];
@@ -732,8 +755,8 @@ struct bnxt_vf_info {
struct bnxt_pf_info {
#define BNXT_FIRST_PF_FID 1
#define BNXT_FIRST_VF_FID 128
- u32 fw_fid;
- u8 port_id;
+ u16 fw_fid;
+ u16 port_id;
u8 mac_addr[ETH_ALEN];
u16 max_rsscos_ctxs;
u16 max_cp_rings;
@@ -762,10 +785,12 @@ struct bnxt_pf_info {
struct bnxt_ntuple_filter {
struct hlist_node hash;
+ u8 dst_mac_addr[ETH_ALEN];
u8 src_mac_addr[ETH_ALEN];
struct flow_keys fkeys;
__le64 filter_id;
u16 sw_id;
+ u8 l2_fltr_idx;
u16 rxq;
u32 flow_id;
unsigned long state;
@@ -774,6 +799,7 @@ struct bnxt_ntuple_filter {
};
struct bnxt_link_info {
+ u8 phy_type;
u8 media_type;
u8 transceiver;
u8 phy_addr;
@@ -803,7 +829,7 @@ struct bnxt_link_info {
#define BNXT_LINK_AUTO_ALLSPDS PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS
#define BNXT_LINK_AUTO_ONESPD PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED
#define BNXT_LINK_AUTO_ONEORBELOW PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW
-#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_MASK
+#define BNXT_LINK_AUTO_MSK PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK
#define PHY_VER_LEN 3
u8 phy_ver[PHY_VER_LEN];
u16 link_speed;
@@ -827,10 +853,11 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_MSK_25GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB
#define BNXT_LINK_SPEED_MSK_40GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB
#define BNXT_LINK_SPEED_MSK_50GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB
+ u16 support_auto_speeds;
u16 lp_auto_link_speeds;
- u16 auto_link_speed;
u16 force_link_speed;
u32 preemphasis;
+ u8 module_status;
/* copy of requested setting from ethtool cmd */
u8 autoneg;
@@ -841,6 +868,7 @@ struct bnxt_link_info {
u16 req_link_speed;
u32 advertising;
bool force_link_chng;
+
/* a copy of phy_qcfg output used to report link
* info to VF
*/
@@ -864,6 +892,45 @@ struct bnxt {
void __iomem *bar2;
u32 reg_base;
+ u16 chip_num;
+#define CHIP_NUM_57301 0x16c8
+#define CHIP_NUM_57302 0x16c9
+#define CHIP_NUM_57304 0x16ca
+#define CHIP_NUM_58700 0x16cd
+#define CHIP_NUM_57402 0x16d0
+#define CHIP_NUM_57404 0x16d1
+#define CHIP_NUM_57406 0x16d2
+
+#define CHIP_NUM_57311 0x16ce
+#define CHIP_NUM_57312 0x16cf
+#define CHIP_NUM_57314 0x16df
+#define CHIP_NUM_57412 0x16d6
+#define CHIP_NUM_57414 0x16d7
+#define CHIP_NUM_57416 0x16d8
+#define CHIP_NUM_57417 0x16d9
+
+#define BNXT_CHIP_NUM_5730X(chip_num) \
+ ((chip_num) >= CHIP_NUM_57301 && \
+ (chip_num) <= CHIP_NUM_57304)
+
+#define BNXT_CHIP_NUM_5740X(chip_num) \
+ ((chip_num) >= CHIP_NUM_57402 && \
+ (chip_num) <= CHIP_NUM_57406)
+
+#define BNXT_CHIP_NUM_5731X(chip_num) \
+ ((chip_num) == CHIP_NUM_57311 || \
+ (chip_num) == CHIP_NUM_57312 || \
+ (chip_num) == CHIP_NUM_57314)
+
+#define BNXT_CHIP_NUM_5741X(chip_num) \
+ ((chip_num) >= CHIP_NUM_57412 && \
+ (chip_num) <= CHIP_NUM_57417)
+
+#define BNXT_CHIP_NUM_57X0X(chip_num) \
+ (BNXT_CHIP_NUM_5730X(chip_num) || BNXT_CHIP_NUM_5740X(chip_num))
+
+#define BNXT_CHIP_NUM_57X1X(chip_num) \
+ (BNXT_CHIP_NUM_5731X(chip_num) || BNXT_CHIP_NUM_5741X(chip_num))
struct net_device *dev;
struct pci_dev *pdev;
@@ -890,6 +957,8 @@ struct bnxt {
#define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_PORT_STATS 0x400
+ #define BNXT_FLAG_EEE_CAP 0x1000
+ #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \
@@ -897,12 +966,18 @@ struct bnxt {
#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF))
#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF)
+#define BNXT_NPAR(bp) ((bp)->port_partition_type)
+#define BNXT_SINGLE_PF(bp) (BNXT_PF(bp) && !BNXT_NPAR(bp))
+#define BNXT_CHIP_TYPE_NITRO_A0(bp) ((bp)->flags & BNXT_FLAG_CHIP_NITRO_A0)
struct bnxt_napi **bnapi;
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_tx_ring_info *tx_ring;
+ struct sk_buff * (*gro_func)(struct bnxt_tpa_info *, int, int,
+ struct sk_buff *);
+
u32 rx_buf_size;
u32 rx_buf_use_size; /* useable size */
u32 rx_ring_size;
@@ -949,12 +1024,14 @@ struct bnxt {
unsigned long state;
#define BNXT_STATE_OPEN 0
#define BNXT_STATE_IN_SP_TASK 1
+#define BNXT_STATE_FN_RST_DONE 2
struct bnxt_irq *irq_tbl;
u8 mac_addr[ETH_ALEN];
u32 msg_enable;
+ u32 hwrm_spec_code;
u16 hwrm_cmd_seq;
u32 hwrm_intr_seq_id;
void *hwrm_cmd_resp_addr;
@@ -980,8 +1057,10 @@ struct bnxt {
__be16 vxlan_port;
u8 vxlan_port_cnt;
__le16 vxlan_fw_dst_port_id;
+ __be16 nge_port;
u8 nge_port_cnt;
__le16 nge_fw_dst_port_id;
+ u8 port_partition_type;
u16 rx_coal_ticks;
u16 rx_coal_ticks_irq;
@@ -994,6 +1073,11 @@ struct bnxt {
#define BNXT_USEC_TO_COAL_TIMER(x) ((x) * 25 / 2)
+ u32 stats_coal_ticks;
+#define BNXT_DEF_STATS_COAL_TICKS 1000000
+#define BNXT_MIN_STATS_COAL_TICKS 250000
+#define BNXT_MAX_STATS_COAL_TICKS 1000000
+
struct work_struct sp_task;
unsigned long sp_event;
#define BNXT_RX_MASK_SP_EVENT 0
@@ -1006,6 +1090,10 @@ struct bnxt {
#define BNXT_RST_RING_SP_EVENT 7
#define BNXT_HWRM_PF_UNLOAD_SP_EVENT 8
#define BNXT_PERIODIC_STATS_SP_EVENT 9
+#define BNXT_HWRM_PORT_MODULE_SP_EVENT 10
+#define BNXT_RESET_TASK_SILENT_SP_EVENT 11
+#define BNXT_GENEVE_ADD_PORT_SP_EVENT 12
+#define BNXT_GENEVE_DEL_PORT_SP_EVENT 13
struct bnxt_pf_info pf;
#ifdef CONFIG_BNXT_SRIOV
@@ -1026,6 +1114,9 @@ struct bnxt {
int ntp_fltr_count;
struct bnxt_link_info link_info;
+ struct ethtool_eee eee;
+ u32 lpi_tmr_lo;
+ u32 lpi_tmr_hi;
};
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -1115,6 +1206,16 @@ static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
#endif
+#define I2C_DEV_ADDR_A0 0xa0
+#define I2C_DEV_ADDR_A2 0xa2
+#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
+#define SFP_EEPROM_SFF_8472_COMP_SIZE 1
+#define SFF_MODULE_ID_SFP 0x3
+#define SFF_MODULE_ID_QSFP 0xc
+#define SFF_MODULE_ID_QSFP_PLUS 0xd
+#define SFF_MODULE_ID_QSFP28 0x11
+#define BNXT_MAX_PHY_I2C_RESP_SIZE 64
+
void bnxt_set_ring_params(struct bnxt *);
void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int);
@@ -1123,7 +1224,7 @@ int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_set_coal(struct bnxt *);
int bnxt_hwrm_func_qcaps(struct bnxt *);
int bnxt_hwrm_set_pause(struct bnxt *);
-int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
+int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 2e472f6dbf2d..b83e17403d6c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -56,6 +56,8 @@ static int bnxt_get_coalesce(struct net_device *dev,
coal->tx_coalesce_usecs_irq = bp->tx_coal_ticks_irq;
coal->tx_max_coalesced_frames_irq = bp->tx_coal_bufs_irq;
+ coal->stats_block_coalesce_usecs = bp->stats_coal_ticks;
+
return 0;
}
@@ -63,6 +65,7 @@ static int bnxt_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *coal)
{
struct bnxt *bp = netdev_priv(dev);
+ bool update_stats = false;
int rc = 0;
bp->rx_coal_ticks = coal->rx_coalesce_usecs;
@@ -76,8 +79,26 @@ static int bnxt_set_coalesce(struct net_device *dev,
bp->tx_coal_ticks_irq = coal->tx_coalesce_usecs_irq;
bp->tx_coal_bufs_irq = coal->tx_max_coalesced_frames_irq;
- if (netif_running(dev))
- rc = bnxt_hwrm_set_coal(bp);
+ if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
+ u32 stats_ticks = coal->stats_block_coalesce_usecs;
+
+ stats_ticks = clamp_t(u32, stats_ticks,
+ BNXT_MIN_STATS_COAL_TICKS,
+ BNXT_MAX_STATS_COAL_TICKS);
+ stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
+ bp->stats_coal_ticks = stats_ticks;
+ update_stats = true;
+ }
+
+ if (netif_running(dev)) {
+ if (update_stats) {
+ rc = bnxt_close_nic(bp, true, false);
+ if (!rc)
+ rc = bnxt_open_nic(bp, true, false);
+ } else {
+ rc = bnxt_hwrm_set_coal(bp);
+ }
+ }
return rc;
}
@@ -327,7 +348,11 @@ static void bnxt_get_channels(struct net_device *dev,
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
channel->max_combined = max_rx_rings;
- bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false);
+ if (bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false)) {
+ max_rx_rings = 0;
+ max_tx_rings = 0;
+ }
+
tcs = netdev_get_num_tc(dev);
if (tcs > 1)
max_tx_rings /= tcs;
@@ -337,9 +362,13 @@ static void bnxt_get_channels(struct net_device *dev,
channel->max_other = 0;
if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
channel->combined_count = bp->rx_nr_rings;
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp))
+ channel->combined_count--;
} else {
- channel->rx_count = bp->rx_nr_rings;
- channel->tx_count = bp->tx_nr_rings_per_tc;
+ if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) {
+ channel->rx_count = bp->rx_nr_rings;
+ channel->tx_count = bp->tx_nr_rings_per_tc;
+ }
}
}
@@ -362,6 +391,10 @@ static int bnxt_set_channels(struct net_device *dev,
(channel->rx_count || channel->tx_count))
return -EINVAL;
+ if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (channel->rx_count ||
+ channel->tx_count))
+ return -EINVAL;
+
if (channel->combined_count)
sh = true;
@@ -597,7 +630,7 @@ static void bnxt_get_drvinfo(struct net_device *dev,
kfree(pkglog);
}
-static u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
+u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
{
u32 speed_mask = 0;
@@ -624,7 +657,66 @@ static u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
return speed_mask;
}
-static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
+#define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
+{ \
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 100baseT_Full); \
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 1000baseT_Full); \
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 10000baseT_Full); \
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 25000baseCR_Full); \
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 40000baseCR4_Full);\
+ if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ 50000baseCR2_Full);\
+ if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ Pause); \
+ if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \
+ ethtool_link_ksettings_add_link_mode( \
+ lk_ksettings, name, Asym_Pause);\
+ } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
+ Asym_Pause); \
+ } \
+}
+
+#define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \
+{ \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 100baseT_Full) || \
+ ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 100baseT_Half)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 1000baseT_Full) || \
+ ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 1000baseT_Half)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 10000baseT_Full)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 25000baseCR_Full)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 40000baseCR4_Full)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \
+ if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
+ 50000baseCR2_Full)) \
+ (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \
+}
+
+static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
{
u16 fw_speeds = link_info->auto_link_speeds;
u8 fw_pause = 0;
@@ -632,10 +724,11 @@ static u32 bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info)
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
fw_pause = link_info->auto_pause_setting;
- return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause);
+ BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, advertising);
}
-static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info)
+static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
{
u16 fw_speeds = link_info->lp_auto_link_speeds;
u8 fw_pause = 0;
@@ -643,16 +736,24 @@ static u32 bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info *link_info)
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
fw_pause = link_info->lp_pause;
- return _bnxt_fw_to_ethtool_adv_spds(fw_speeds, fw_pause);
+ BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings,
+ lp_advertising);
}
-static u32 bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info)
+static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info *link_info,
+ struct ethtool_link_ksettings *lk_ksettings)
{
u16 fw_speeds = link_info->support_speeds;
- u32 supported;
- supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
- return supported | SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, 0, lk_ksettings, supported);
+
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Asym_Pause);
+
+ if (link_info->support_auto_speeds)
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ Autoneg);
}
u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
@@ -679,91 +780,114 @@ u32 bnxt_fw_to_ethtool_speed(u16 fw_link_speed)
}
}
-static int bnxt_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int bnxt_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *lk_ksettings)
{
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- u16 ethtool_speed;
-
- cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
+ struct ethtool_link_settings *base = &lk_ksettings->base;
+ u32 ethtool_speed;
- if (link_info->auto_link_speeds)
- cmd->supported |= SUPPORTED_Autoneg;
+ ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
+ bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
+ ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
if (link_info->autoneg) {
- cmd->advertising =
- bnxt_fw_to_ethtool_advertised_spds(link_info);
- cmd->advertising |= ADVERTISED_Autoneg;
- cmd->autoneg = AUTONEG_ENABLE;
+ bnxt_fw_to_ethtool_advertised_spds(link_info, lk_ksettings);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings,
+ advertising, Autoneg);
+ base->autoneg = AUTONEG_ENABLE;
if (link_info->phy_link_status == BNXT_LINK_LINK)
- cmd->lp_advertising =
- bnxt_fw_to_ethtool_lp_adv(link_info);
+ bnxt_fw_to_ethtool_lp_adv(link_info, lk_ksettings);
+ ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
+ if (!netif_carrier_ok(dev))
+ base->duplex = DUPLEX_UNKNOWN;
+ else if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
+ base->duplex = DUPLEX_FULL;
+ else
+ base->duplex = DUPLEX_HALF;
} else {
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->advertising = 0;
+ base->autoneg = AUTONEG_DISABLE;
+ ethtool_speed =
+ bnxt_fw_to_ethtool_speed(link_info->req_link_speed);
+ base->duplex = DUPLEX_HALF;
+ if (link_info->req_duplex == BNXT_LINK_DUPLEX_FULL)
+ base->duplex = DUPLEX_FULL;
}
+ base->speed = ethtool_speed;
- cmd->port = PORT_NONE;
+ base->port = PORT_NONE;
if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
- cmd->port = PORT_TP;
- cmd->supported |= SUPPORTED_TP;
- cmd->advertising |= ADVERTISED_TP;
+ base->port = PORT_TP;
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ TP);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
+ TP);
} else {
- cmd->supported |= SUPPORTED_FIBRE;
- cmd->advertising |= ADVERTISED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, supported,
+ FIBRE);
+ ethtool_link_ksettings_add_link_mode(lk_ksettings, advertising,
+ FIBRE);
if (link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC)
- cmd->port = PORT_DA;
+ base->port = PORT_DA;
else if (link_info->media_type ==
PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE)
- cmd->port = PORT_FIBRE;
+ base->port = PORT_FIBRE;
}
-
- if (link_info->phy_link_status == BNXT_LINK_LINK) {
- if (link_info->duplex & BNXT_LINK_DUPLEX_FULL)
- cmd->duplex = DUPLEX_FULL;
- } else {
- cmd->duplex = DUPLEX_UNKNOWN;
- }
- ethtool_speed = bnxt_fw_to_ethtool_speed(link_info->link_speed);
- ethtool_cmd_speed_set(cmd, ethtool_speed);
- if (link_info->transceiver ==
- PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL)
- cmd->transceiver = XCVR_INTERNAL;
- else
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->phy_address = link_info->phy_addr;
+ base->phy_address = link_info->phy_addr;
return 0;
}
static u32 bnxt_get_fw_speed(struct net_device *dev, u16 ethtool_speed)
{
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u16 support_spds = link_info->support_speeds;
+ u32 fw_speed = 0;
+
switch (ethtool_speed) {
case SPEED_100:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_100MB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB;
+ break;
case SPEED_1000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_1GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB;
+ break;
case SPEED_2500:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_2_5GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB;
+ break;
case SPEED_10000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_10GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB;
+ break;
case SPEED_20000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_20GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB;
+ break;
case SPEED_25000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_25GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB;
+ break;
case SPEED_40000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_40GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB;
+ break;
case SPEED_50000:
- return PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+ if (support_spds & BNXT_LINK_SPEED_MSK_50GB)
+ fw_speed = PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB;
+ break;
default:
netdev_err(dev, "unsupported speed!\n");
break;
}
- return 0;
+ return fw_speed;
}
-static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
+u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
{
u16 fw_speed_mask = 0;
@@ -785,37 +909,25 @@ static u16 bnxt_get_fw_auto_link_speeds(u32 advertising)
return fw_speed_mask;
}
-static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int bnxt_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *lk_ksettings)
{
- int rc = 0;
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
+ const struct ethtool_link_settings *base = &lk_ksettings->base;
u32 speed, fw_advertising = 0;
bool set_pause = false;
+ int rc = 0;
- if (BNXT_VF(bp))
- return rc;
-
- if (cmd->autoneg == AUTONEG_ENABLE) {
- u32 supported_spds = bnxt_fw_to_ethtool_support_spds(link_info);
+ if (!BNXT_SINGLE_PF(bp))
+ return -EOPNOTSUPP;
- if (cmd->advertising & ~(supported_spds | ADVERTISED_Autoneg |
- ADVERTISED_TP | ADVERTISED_FIBRE)) {
- netdev_err(dev, "Unsupported advertising mask (adv: 0x%x)\n",
- cmd->advertising);
- rc = -EINVAL;
- goto set_setting_exit;
- }
- fw_advertising = bnxt_get_fw_auto_link_speeds(cmd->advertising);
- if (fw_advertising & ~link_info->support_speeds) {
- netdev_err(dev, "Advertising parameters are not supported! (adv: 0x%x)\n",
- cmd->advertising);
- rc = -EINVAL;
- goto set_setting_exit;
- }
+ if (base->autoneg == AUTONEG_ENABLE) {
+ BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
+ advertising);
link_info->autoneg |= BNXT_AUTONEG_SPEED;
if (!fw_advertising)
- link_info->advertising = link_info->support_speeds;
+ link_info->advertising = link_info->support_auto_speeds;
else
link_info->advertising = fw_advertising;
/* any change to autoneg will cause link change, therefore the
@@ -823,24 +935,35 @@ static int bnxt_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
*/
set_pause = true;
} else {
- /* TODO: currently don't support half duplex */
- if (cmd->duplex == DUPLEX_HALF) {
+ u16 fw_speed;
+ u8 phy_type = link_info->phy_type;
+
+ if (phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASET ||
+ phy_type == PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE ||
+ link_info->media_type == PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP) {
+ netdev_err(dev, "10GBase-T devices must autoneg\n");
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ if (base->duplex == DUPLEX_HALF) {
netdev_err(dev, "HALF DUPLEX is not supported!\n");
rc = -EINVAL;
goto set_setting_exit;
}
- /* If received a request for an unknown duplex, assume full*/
- if (cmd->duplex == DUPLEX_UNKNOWN)
- cmd->duplex = DUPLEX_FULL;
- speed = ethtool_cmd_speed(cmd);
- link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
+ speed = base->speed;
+ fw_speed = bnxt_get_fw_speed(dev, speed);
+ if (!fw_speed) {
+ rc = -EINVAL;
+ goto set_setting_exit;
+ }
+ link_info->req_link_speed = fw_speed;
link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
link_info->autoneg = 0;
link_info->advertising = 0;
}
if (netif_running(dev))
- rc = bnxt_hwrm_set_link_setting(bp, set_pause);
+ rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
set_setting_exit:
return rc;
@@ -866,15 +989,17 @@ static int bnxt_set_pauseparam(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info;
- if (BNXT_VF(bp))
- return rc;
+ if (!BNXT_SINGLE_PF(bp))
+ return -EOPNOTSUPP;
if (epause->autoneg) {
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
return -EINVAL;
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
- link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
+ if (bp->hwrm_spec_code >= 0x10201)
+ link_info->req_flow_ctrl =
+ PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
} else {
/* when transition from auto pause to force pause,
* force a link change
@@ -882,17 +1007,13 @@ static int bnxt_set_pauseparam(struct net_device *dev,
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
link_info->force_link_chng = true;
link_info->autoneg &= ~BNXT_AUTONEG_FLOW_CTRL;
- link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_BOTH;
+ link_info->req_flow_ctrl = 0;
}
if (epause->rx_pause)
link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_RX;
- else
- link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_RX;
if (epause->tx_pause)
link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX;
- else
- link_info->req_flow_ctrl &= ~BNXT_LINK_PAUSE_TX;
if (netif_running(dev))
rc = bnxt_hwrm_set_pause(bp);
@@ -967,6 +1088,8 @@ static int bnxt_firmware_reset(struct net_device *dev,
case BNX_DIR_TYPE_APE_FW:
case BNX_DIR_TYPE_APE_PATCH:
req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT;
+ /* Self-reset APE upon next PCIe reset: */
+ req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST;
break;
case BNX_DIR_TYPE_KONG_FW:
case BNX_DIR_TYPE_KONG_PATCH:
@@ -1000,9 +1123,27 @@ static int bnxt_flash_firmware(struct net_device *dev,
case BNX_DIR_TYPE_BOOTCODE_2:
code_type = CODE_BOOT;
break;
+ case BNX_DIR_TYPE_CHIMP_PATCH:
+ code_type = CODE_CHIMP_PATCH;
+ break;
case BNX_DIR_TYPE_APE_FW:
code_type = CODE_MCTP_PASSTHRU;
break;
+ case BNX_DIR_TYPE_APE_PATCH:
+ code_type = CODE_APE_PATCH;
+ break;
+ case BNX_DIR_TYPE_KONG_FW:
+ code_type = CODE_KONG_FW;
+ break;
+ case BNX_DIR_TYPE_KONG_PATCH:
+ code_type = CODE_KONG_PATCH;
+ break;
+ case BNX_DIR_TYPE_BONO_FW:
+ code_type = CODE_BONO_FW;
+ break;
+ case BNX_DIR_TYPE_BONO_PATCH:
+ code_type = CODE_BONO_PATCH;
+ break;
default:
netdev_err(dev, "Unsupported directory entry type: %u\n",
dir_type);
@@ -1057,6 +1198,8 @@ static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type)
case BNX_DIR_TYPE_APE_PATCH:
case BNX_DIR_TYPE_KONG_FW:
case BNX_DIR_TYPE_KONG_PATCH:
+ case BNX_DIR_TYPE_BONO_FW:
+ case BNX_DIR_TYPE_BONO_PATCH:
return true;
}
@@ -1094,7 +1237,8 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev,
const struct firmware *fw;
int rc;
- if (bnxt_dir_type_is_executable(dir_type) == false)
+ if (dir_type != BNX_DIR_TYPE_UPDATE &&
+ bnxt_dir_type_is_executable(dir_type) == false)
return -EINVAL;
rc = request_firmware(&fw, filename, &dev->dev);
@@ -1381,9 +1525,202 @@ static int bnxt_set_eeprom(struct net_device *dev,
eeprom->len);
}
+static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct ethtool_eee *eee = &bp->eee;
+ struct bnxt_link_info *link_info = &bp->link_info;
+ u32 advertising =
+ _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
+ int rc = 0;
+
+ if (!BNXT_SINGLE_PF(bp))
+ return -EOPNOTSUPP;
+
+ if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+ return -EOPNOTSUPP;
+
+ if (!edata->eee_enabled)
+ goto eee_ok;
+
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
+ netdev_warn(dev, "EEE requires autoneg\n");
+ return -EINVAL;
+ }
+ if (edata->tx_lpi_enabled) {
+ if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi ||
+ edata->tx_lpi_timer < bp->lpi_tmr_lo)) {
+ netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n",
+ bp->lpi_tmr_lo, bp->lpi_tmr_hi);
+ return -EINVAL;
+ } else if (!bp->lpi_tmr_hi) {
+ edata->tx_lpi_timer = eee->tx_lpi_timer;
+ }
+ }
+ if (!edata->advertised) {
+ edata->advertised = advertising & eee->supported;
+ } else if (edata->advertised & ~advertising) {
+ netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
+ edata->advertised, advertising);
+ return -EINVAL;
+ }
+
+ eee->advertised = edata->advertised;
+ eee->tx_lpi_enabled = edata->tx_lpi_enabled;
+ eee->tx_lpi_timer = edata->tx_lpi_timer;
+eee_ok:
+ eee->eee_enabled = edata->eee_enabled;
+
+ if (netif_running(dev))
+ rc = bnxt_hwrm_set_link_setting(bp, false, true);
+
+ return rc;
+}
+
+static int bnxt_get_eee(struct net_device *dev, struct ethtool_eee *edata)
+{
+ struct bnxt *bp = netdev_priv(dev);
+
+ if (!(bp->flags & BNXT_FLAG_EEE_CAP))
+ return -EOPNOTSUPP;
+
+ *edata = bp->eee;
+ if (!bp->eee.eee_enabled) {
+ /* Preserve tx_lpi_timer so that the last value will be used
+ * by default when it is re-enabled.
+ */
+ edata->advertised = 0;
+ edata->tx_lpi_enabled = 0;
+ }
+
+ if (!bp->eee.eee_active)
+ edata->lp_advertised = 0;
+
+ return 0;
+}
+
+static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr,
+ u16 page_number, u16 start_addr,
+ u16 data_length, u8 *buf)
+{
+ struct hwrm_port_phy_i2c_read_input req = {0};
+ struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
+ int rc, byte_offset = 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
+ req.i2c_slave_addr = i2c_addr;
+ req.page_number = cpu_to_le16(page_number);
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ do {
+ u16 xfer_size;
+
+ xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE);
+ data_length -= xfer_size;
+ req.page_offset = cpu_to_le16(start_addr + byte_offset);
+ req.data_length = xfer_size;
+ req.enables = cpu_to_le32(start_addr + byte_offset ?
+ PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req),
+ HWRM_CMD_TIMEOUT);
+ if (!rc)
+ memcpy(buf + byte_offset, output->data, xfer_size);
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ byte_offset += xfer_size;
+ } while (!rc && data_length > 0);
+
+ return rc;
+}
+
+static int bnxt_get_module_info(struct net_device *dev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ struct hwrm_port_phy_i2c_read_input req = {0};
+ struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr;
+ int rc;
+
+ /* No point in going further if phy status indicates
+ * module is not inserted or if it is powered down or
+ * if it is of type 10GBase-T
+ */
+ if (bp->link_info.module_status >
+ PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG)
+ return -EOPNOTSUPP;
+
+ /* This feature is not supported in older firmware versions */
+ if (bp->hwrm_spec_code < 0x10202)
+ return -EOPNOTSUPP;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1);
+ req.i2c_slave_addr = I2C_DEV_ADDR_A0;
+ req.page_number = 0;
+ req.page_offset = cpu_to_le16(SFP_EEPROM_SFF_8472_COMP_ADDR);
+ req.data_length = SFP_EEPROM_SFF_8472_COMP_SIZE;
+ req.port_id = cpu_to_le16(bp->pf.port_id);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (!rc) {
+ u32 module_id = le32_to_cpu(output->data[0]);
+
+ switch (module_id) {
+ case SFF_MODULE_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case SFF_MODULE_ID_QSFP:
+ case SFF_MODULE_ID_QSFP_PLUS:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case SFF_MODULE_ID_QSFP28:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ break;
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+}
+
+static int bnxt_get_module_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ u16 start = eeprom->offset, length = eeprom->len;
+ int rc = 0;
+
+ memset(data, 0, eeprom->len);
+
+ /* Read A0 portion of the EEPROM */
+ if (start < ETH_MODULE_SFF_8436_LEN) {
+ if (start + eeprom->len > ETH_MODULE_SFF_8436_LEN)
+ length = ETH_MODULE_SFF_8436_LEN - start;
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A0, 0,
+ start, length, data);
+ if (rc)
+ return rc;
+ start += length;
+ data += length;
+ length = eeprom->len - length;
+ }
+
+ /* Read A2 portion of the EEPROM */
+ if (length) {
+ start -= ETH_MODULE_SFF_8436_LEN;
+ bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
+ length, data);
+ }
+ return rc;
+}
+
const struct ethtool_ops bnxt_ethtool_ops = {
- .get_settings = bnxt_get_settings,
- .set_settings = bnxt_set_settings,
+ .get_link_ksettings = bnxt_get_link_ksettings,
+ .set_link_ksettings = bnxt_set_link_ksettings,
.get_pauseparam = bnxt_get_pauseparam,
.set_pauseparam = bnxt_set_pauseparam,
.get_drvinfo = bnxt_get_drvinfo,
@@ -1409,4 +1746,8 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.get_eeprom = bnxt_get_eeprom,
.set_eeprom = bnxt_set_eeprom,
.get_link = bnxt_get_link,
+ .get_eee = bnxt_get_eee,
+ .set_eee = bnxt_set_eee,
+ .get_module_info = bnxt_get_module_info,
+ .get_module_eeprom = bnxt_get_module_eeprom,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 98fa81e08b58..3abc03b60dbc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,6 +12,8 @@
extern const struct ethtool_ops bnxt_ethtool_ops;
+u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
u32 bnxt_fw_to_ethtool_speed(u16);
+u16 bnxt_get_fw_auto_link_speeds(u32);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
index e0aac65c6d82..82bf44ab811b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_fw_hdr.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -70,6 +70,7 @@ enum SUPPORTED_CODE {
CODE_KONG_PATCH, /* 18 - KONG Patch firmware */
CODE_BONO_FW, /* 19 - BONO firmware */
CODE_BONO_PATCH, /* 20 - BONO Patch firmware */
+ CODE_CHIMP_PATCH, /* 21 - ChiMP Patch firmware */
MAX_CODE_TYPE,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 4badbedcb421..517567f6d651 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -104,6 +104,8 @@ struct hwrm_async_event_cmpl {
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE (0x3UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED (0x4UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_PHY_CFG_CHANGE (0x7UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD (0x10UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD (0x11UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD (0x20UL << 0)
@@ -111,6 +113,7 @@ struct hwrm_async_event_cmpl {
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (0x30UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE (0x31UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR (0xffUL << 0)
__le32 event_data2;
u8 opaque_v;
@@ -141,6 +144,7 @@ struct hwrm_async_event_cmpl_link_status_change {
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_DOWN (0x0UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP (0x1UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_LAST HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_LINK_CHANGE_UP
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_MASK 0xeUL
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_SFT 1
#define HWRM_ASYNC_EVENT_CMPL_LINK_STATUS_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0UL
@@ -195,6 +199,9 @@ struct hwrm_async_event_cmpl_link_speed_change {
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_25GB (0xfaUL << 1)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_40GB (0x190UL << 1)
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_50GB (0x1f4UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_100GB (0x3e8UL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB (0xffffUL << 1)
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_LAST HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_NEW_LINK_SPEED_100MBPS_10MB
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffff0000UL
#define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CHANGE_EVENT_DATA1_PORT_ID_SFT 16
};
@@ -237,6 +244,55 @@ struct hwrm_async_event_cmpl_port_conn_not_allowed {
__le32 event_data1;
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
#define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_MASK 0xff0000UL
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_SFT 16
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_NONE (0x0UL << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_DISABLETX (0x1UL << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_WARNINGMSG (0x2UL << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN (0x3UL << 16)
+ #define HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_LAST HWRM_ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_ENFORCEMENT_POLICY_PWRDOWN
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed config not allowed (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_cfg_not_allowed {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED (0x5UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_NOT_ALLOWED_EVENT_DATA1_PORT_ID_SFT 0
+};
+
+/* HWRM Asynchronous Event Completion Record for link speed configuration change (16 bytes) */
+struct hwrm_async_event_cmpl_link_speed_cfg_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_ID_LINK_SPEED_CFG_CHANGE (0x6UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_MASK 0xffffUL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_PORT_ID_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_SUPPORTED_LINK_SPEEDS_CHANGE 0x10000UL
+ #define HWRM_ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL
};
/* HWRM Asynchronous Event Completion Record for Function Driver Unload (16 bytes) */
@@ -363,6 +419,47 @@ struct hwrm_async_event_cmpl_vf_mac_addr_change {
#define HWRM_ASYNC_EVENT_CMPL_VF_MAC_ADDR_CHANGE_EVENT_DATA1_VF_ID_SFT 0
};
+/* HWRM Asynchronous Event Completion Record for PF-VF communication status change (16 bytes) */
+struct hwrm_async_event_cmpl_pf_vf_comm_status_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_ID_PF_VF_COMM_STATUS_CHANGE (0x32UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_PF_VF_COMM_STATUS_CHANGE_EVENT_DATA1_COMM_ESTABLISHED 0x1UL
+};
+
+/* HWRM Asynchronous Event Completion Record for VF configuration change (16 bytes) */
+struct hwrm_async_event_cmpl_vf_cfg_change {
+ __le16 type;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_MASK 0x3fUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_SFT 0
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_TYPE_HWRM_ASYNC_EVENT (0x2eUL << 0)
+ __le16 event_id;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_ID_VF_CFG_CHANGE (0x33UL << 0)
+ __le32 event_data2;
+ u8 opaque_v;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_V 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_MASK 0xfeUL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_OPAQUE_SFT 1
+ u8 timestamp_lo;
+ __le16 timestamp_hi;
+ __le32 event_data1;
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MTU_CHANGE 0x1UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_MRU_CHANGE 0x2UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_MAC_ADDR_CHANGE 0x4UL
+ #define HWRM_ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_DFLT_VLAN_CHANGE 0x8UL
+};
+
/* HWRM Asynchronous Event Completion Record for HWRM Error (16 bytes) */
struct hwrm_async_event_cmpl_hwrm_error {
__le16 type;
@@ -377,6 +474,7 @@ struct hwrm_async_event_cmpl_hwrm_error {
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_WARNING (0x0UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_NONFATAL (0x1UL << 0)
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL (0x2UL << 0)
+ #define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_LAST HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA2_SEVERITY_FATAL
u8 opaque_v;
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_V 0x1UL
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_OPAQUE_MASK 0xfeUL
@@ -387,12 +485,12 @@ struct hwrm_async_event_cmpl_hwrm_error {
#define HWRM_ASYNC_EVENT_CMPL_HWRM_ERROR_EVENT_DATA1_TIMESTAMP 0x1UL
};
-/* HW Resource Manager Specification 1.0.0 */
+/* HW Resource Manager Specification 1.3.0 */
#define HWRM_VERSION_MAJOR 1
-#define HWRM_VERSION_MINOR 0
+#define HWRM_VERSION_MINOR 3
#define HWRM_VERSION_UPDATE 0
-#define HWRM_VERSION_STR "1.0.0"
+#define HWRM_VERSION_STR "1.3.0"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
@@ -444,7 +542,7 @@ struct cmd_nums {
#define HWRM_FUNC_BUF_RGTR (0x1fUL)
#define HWRM_PORT_PHY_CFG (0x20UL)
#define HWRM_PORT_MAC_CFG (0x21UL)
- #define RESERVED2 (0x22UL)
+ #define HWRM_PORT_TS_QUERY (0x22UL)
#define HWRM_PORT_QSTATS (0x23UL)
#define HWRM_PORT_LPBK_QSTATS (0x24UL)
#define HWRM_PORT_CLR_STATS (0x25UL)
@@ -452,6 +550,9 @@ struct cmd_nums {
#define HWRM_PORT_PHY_QCFG (0x27UL)
#define HWRM_PORT_MAC_QCFG (0x28UL)
#define HWRM_PORT_BLINK_LED (0x29UL)
+ #define HWRM_PORT_PHY_QCAPS (0x2aUL)
+ #define HWRM_PORT_PHY_I2C_WRITE (0x2bUL)
+ #define HWRM_PORT_PHY_I2C_READ (0x2cUL)
#define HWRM_QUEUE_QPORTCFG (0x30UL)
#define HWRM_QUEUE_QCFG (0x31UL)
#define HWRM_QUEUE_CFG (0x32UL)
@@ -511,6 +612,9 @@ struct cmd_nums {
#define HWRM_FWD_RESP (0xd2UL)
#define HWRM_FWD_ASYNC_EVENT_CMPL (0xd3UL)
#define HWRM_TEMP_MONITOR_QUERY (0xe0UL)
+ #define HWRM_WOL_FILTER_ALLOC (0xf0UL)
+ #define HWRM_WOL_FILTER_FREE (0xf1UL)
+ #define HWRM_WOL_FILTER_QCFG (0xf2UL)
#define HWRM_DBG_READ_DIRECT (0xff10UL)
#define HWRM_DBG_READ_INDIRECT (0xff11UL)
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
@@ -531,6 +635,7 @@ struct cmd_nums {
__le16 unused_0[3];
};
+/* Return Codes (8 bytes) */
struct ret_codes {
__le16 error_code;
#define HWRM_ERR_CODE_SUCCESS (0x0UL)
@@ -875,10 +980,11 @@ struct hwrm_func_vf_cfg_input {
#define FUNC_VF_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_VF_CFG_REQ_ENABLES_GUEST_VLAN 0x2UL
#define FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR 0x4UL
+ #define FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR 0x8UL
__le16 mtu;
__le16 guest_vlan;
__le16 async_event_cr;
- __le16 unused_0[3];
+ u8 dflt_mac_addr[6];
};
/* Output (16 bytes) */
@@ -917,7 +1023,12 @@ struct hwrm_func_qcaps_output {
__le32 flags;
#define FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED 0x1UL
#define FUNC_QCAPS_RESP_FLAGS_GLOBAL_MSIX_AUTOMASKING 0x2UL
- u8 perm_mac_address[6];
+ #define FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED 0x4UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED 0x8UL
+ #define FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED 0x10UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED 0x20UL
+ #define FUNC_QCAPS_RESP_FLAGS_WOL_BMP_SUPPORTED 0x40UL
+ u8 mac_address[6];
__le16 max_rsscos_ctx;
__le16 max_cmpl_rings;
__le16 max_tx_rings;
@@ -942,6 +1053,68 @@ struct hwrm_func_qcaps_output {
u8 valid;
};
+/* hwrm_func_qcfg */
+/* Input (24 bytes) */
+struct hwrm_func_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 fid;
+ __le16 unused_0[3];
+};
+
+/* Output (72 bytes) */
+struct hwrm_func_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le16 fid;
+ __le16 port_id;
+ __le16 vlan;
+ __le16 flags;
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_MAGICPKT_ENABLED 0x1UL
+ #define FUNC_QCFG_RESP_FLAGS_OOB_WOL_BMP_ENABLED 0x2UL
+ u8 mac_address[6];
+ __le16 pci_id;
+ __le16 alloc_rsscos_ctx;
+ __le16 alloc_cmpl_rings;
+ __le16 alloc_tx_rings;
+ __le16 alloc_rx_rings;
+ __le16 alloc_l2_ctx;
+ __le16 alloc_vnics;
+ __le16 mtu;
+ __le16 mru;
+ __le16 stat_ctx_id;
+ u8 port_partition_type;
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_SPF (0x0UL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_MPFS (0x1UL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0 (0x2UL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5 (0x3UL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0 (0x4UL << 0)
+ #define FUNC_QCFG_RESP_PORT_PARTITION_TYPE_UNKNOWN (0xffUL << 0)
+ u8 unused_0;
+ __le16 dflt_vnic_id;
+ u8 unused_1;
+ u8 unused_2;
+ __le32 min_bw;
+ __le32 max_bw;
+ u8 evb_mode;
+ #define FUNC_QCFG_RESP_EVB_MODE_NO_EVB (0x0UL << 0)
+ #define FUNC_QCFG_RESP_EVB_MODE_VEB (0x1UL << 0)
+ #define FUNC_QCFG_RESP_EVB_MODE_VEPA (0x2UL << 0)
+ u8 unused_3;
+ __le16 unused_4;
+ __le32 alloc_mcast_filters;
+ __le32 alloc_hw_ring_grps;
+ u8 unused_5;
+ u8 unused_6;
+ u8 unused_7;
+ u8 valid;
+};
+
/* hwrm_func_cfg */
/* Input (88 bytes) */
struct hwrm_func_cfg_input {
@@ -1171,6 +1344,7 @@ struct hwrm_func_drv_rgtr_input {
#define FUNC_DRV_RGTR_REQ_OS_TYPE_UNKNOWN (0x0UL << 0)
#define FUNC_DRV_RGTR_REQ_OS_TYPE_OTHER (0x1UL << 0)
#define FUNC_DRV_RGTR_REQ_OS_TYPE_MSDOS (0xeUL << 0)
+ #define FUNC_DRV_RGTR_REQ_OS_TYPE_WINDOWS (0x12UL << 0)
#define FUNC_DRV_RGTR_REQ_OS_TYPE_SOLARIS (0x1dUL << 0)
#define FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX (0x24UL << 0)
#define FUNC_DRV_RGTR_REQ_OS_TYPE_FREEBSD (0x2aUL << 0)
@@ -1245,8 +1419,8 @@ struct hwrm_func_buf_rgtr_input {
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4K (0xcUL << 0)
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_8K (0xdUL << 0)
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_64K (0x10UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x16UL << 0)
- #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x17UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_2M (0x15UL << 0)
+ #define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_4M (0x16UL << 0)
#define FUNC_BUF_RGTR_REQ_REQ_BUF_PAGE_SIZE_1G (0x1eUL << 0)
__le16 req_buf_len;
__le16 resp_buf_len;
@@ -1302,6 +1476,7 @@ struct hwrm_func_drv_qver_output {
#define FUNC_DRV_QVER_RESP_OS_TYPE_UNKNOWN (0x0UL << 0)
#define FUNC_DRV_QVER_RESP_OS_TYPE_OTHER (0x1UL << 0)
#define FUNC_DRV_QVER_RESP_OS_TYPE_MSDOS (0xeUL << 0)
+ #define FUNC_DRV_QVER_RESP_OS_TYPE_WINDOWS (0x12UL << 0)
#define FUNC_DRV_QVER_RESP_OS_TYPE_SOLARIS (0x1dUL << 0)
#define FUNC_DRV_QVER_RESP_OS_TYPE_LINUX (0x24UL << 0)
#define FUNC_DRV_QVER_RESP_OS_TYPE_FREEBSD (0x2aUL << 0)
@@ -1317,7 +1492,7 @@ struct hwrm_func_drv_qver_output {
};
/* hwrm_port_phy_cfg */
-/* Input (48 bytes) */
+/* Input (56 bytes) */
struct hwrm_port_phy_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1329,6 +1504,16 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DOWN 0x2UL
#define PORT_PHY_CFG_REQ_FLAGS_FORCE 0x4UL
#define PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG 0x8UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE 0x10UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE 0x20UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE 0x40UL
+ #define PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE 0x80UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE 0x100UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE 0x200UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_ENABLE 0x400UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE74_DISABLE 0x800UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_ENABLE 0x1000UL
+ #define PORT_PHY_CFG_REQ_FLAGS_FEC_CLAUSE91_DISABLE 0x2000UL
__le32 enables;
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE 0x1UL
#define PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX 0x2UL
@@ -1339,6 +1524,8 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_ENABLES_LPBK 0x40UL
#define PORT_PHY_CFG_REQ_ENABLES_PREEMPHASIS 0x80UL
#define PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE 0x100UL
+ #define PORT_PHY_CFG_REQ_ENABLES_EEE_LINK_SPEED_MASK 0x200UL
+ #define PORT_PHY_CFG_REQ_ENABLES_TX_LPI_TIMER 0x400UL
__le16 port_id;
__le16 force_link_speed;
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB (0x1UL << 0)
@@ -1350,12 +1537,14 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB (0x190UL << 0)
#define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
+ #define PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
u8 auto_mode;
#define PORT_PHY_CFG_REQ_AUTO_MODE_NONE (0x0UL << 0)
#define PORT_PHY_CFG_REQ_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_SPEED (0x2UL << 0)
#define PORT_PHY_CFG_REQ_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_CFG_REQ_AUTO_MODE_MASK (0x4UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK (0x4UL << 0)
u8 auto_duplex;
#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_HALF (0x0UL << 0)
#define PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL (0x1UL << 0)
@@ -1363,6 +1552,7 @@ struct hwrm_port_phy_cfg_input {
u8 auto_pause;
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
u8 unused_0;
__le16 auto_link_speed;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB (0x1UL << 0)
@@ -1374,6 +1564,8 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB (0x190UL << 0)
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
__le16 auto_link_speed_mask;
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1386,6 +1578,9 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_25GB 0x100UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_40GB 0x200UL
#define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
u8 wirespeed;
#define PORT_PHY_CFG_REQ_WIRESPEED_OFF (0x0UL << 0)
#define PORT_PHY_CFG_REQ_WIRESPEED_ON (0x1UL << 0)
@@ -1398,7 +1593,20 @@ struct hwrm_port_phy_cfg_input {
#define PORT_PHY_CFG_REQ_FORCE_PAUSE_RX 0x2UL
u8 unused_1;
__le32 preemphasis;
- __le32 unused_2;
+ __le16 eee_link_speed_mask;
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_CFG_REQ_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ u8 unused_2;
+ u8 unused_3;
+ __le32 tx_lpi_timer;
+ __le32 unused_4;
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_CFG_REQ_TX_LPI_TIMER_SFT 0
};
/* Output (16 bytes) */
@@ -1426,7 +1634,7 @@ struct hwrm_port_phy_qcfg_input {
__le16 unused_0[3];
};
-/* Output (48 bytes) */
+/* Output (96 bytes) */
struct hwrm_port_phy_qcfg_output {
__le16 error_code;
__le16 req_type;
@@ -1447,6 +1655,8 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_25GB (0xfaUL << 0)
#define PORT_PHY_QCFG_RESP_LINK_SPEED_40GB (0x190UL << 0)
#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB (0x1f4UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB (0x3e8UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB (0xffffUL << 0)
u8 duplex;
#define PORT_PHY_QCFG_RESP_DUPLEX_HALF (0x0UL << 0)
#define PORT_PHY_QCFG_RESP_DUPLEX_FULL (0x1UL << 0)
@@ -1465,6 +1675,9 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_25GB 0x100UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_40GB 0x200UL
#define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_10MB 0x2000UL
__le16 force_link_speed;
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100MB (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_1GB (0xaUL << 0)
@@ -1475,15 +1688,18 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_25GB (0xfaUL << 0)
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_40GB (0x190UL << 0)
#define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_50GB (0x1f4UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_100GB (0x3e8UL << 0)
+ #define PORT_PHY_QCFG_RESP_FORCE_LINK_SPEED_10MB (0xffffUL << 0)
u8 auto_mode;
#define PORT_PHY_QCFG_RESP_AUTO_MODE_NONE (0x0UL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_SPEED (0x2UL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_AUTO_MODE_MASK (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_MODE_SPEED_MASK (0x4UL << 0)
u8 auto_pause;
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_PAUSE_RX 0x2UL
+ #define PORT_PHY_QCFG_RESP_AUTO_PAUSE_AUTONEG_PAUSE 0x4UL
__le16 auto_link_speed;
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100MB (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_1GB (0xaUL << 0)
@@ -1494,6 +1710,8 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_25GB (0xfaUL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_40GB (0x190UL << 0)
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_50GB (0x1f4UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_100GB (0x3e8UL << 0)
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_10MB (0xffffUL << 0)
__le16 auto_link_speed_mask;
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MBHD 0x1UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100MB 0x2UL
@@ -1506,6 +1724,9 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_25GB 0x100UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_40GB 0x200UL
#define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_AUTO_LINK_SPEED_MASK_10MB 0x2000UL
u8 wirespeed;
#define PORT_PHY_QCFG_RESP_WIRESPEED_OFF (0x0UL << 0)
#define PORT_PHY_QCFG_RESP_WIRESPEED_ON (0x1UL << 0)
@@ -1516,31 +1737,49 @@ struct hwrm_port_phy_qcfg_output {
u8 force_pause;
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_FORCE_PAUSE_RX 0x2UL
- u8 reserved1;
+ u8 module_status;
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NONE (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG (0x2UL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTAPPLICABLE (0xffUL << 0)
__le32 preemphasis;
u8 phy_maj;
u8 phy_min;
u8 phy_bld;
u8 phy_type;
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR4 (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_UNKNOWN (0x0UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4 (0x2UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR4 (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR4 (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR (0x3UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR (0x4UL << 0)
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2 (0x5UL << 0)
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX4 (0x6UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX (0x6UL << 0)
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR (0x7UL << 0)
#define PORT_PHY_QCFG_RESP_PHY_TYPE_BASET (0x8UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE (0x9UL << 0)
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_SGMIIEXTPHY (0xaUL << 0)
u8 media_type;
+ #define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN (0x0UL << 0)
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC (0x2UL << 0)
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE (0x3UL << 0)
- u8 transceiver_type;
- #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_INTERNAL (0x1UL << 0)
- #define PORT_PHY_QCFG_RESP_TRANSCEIVER_TYPE_XCVR_EXTERNAL (0x2UL << 0)
- u8 phy_addr;
+ u8 xcvr_pkg_type;
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_INTERNAL (0x1UL << 0)
+ #define PORT_PHY_QCFG_RESP_XCVR_PKG_TYPE_XCVR_EXTERNAL (0x2UL << 0)
+ u8 eee_config_phy_addr;
#define PORT_PHY_QCFG_RESP_PHY_ADDR_MASK 0x1fUL
#define PORT_PHY_QCFG_RESP_PHY_ADDR_SFT 0
- u8 unused_2;
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED 0x20UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE 0x40UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI 0x80UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_MASK 0xe0UL
+ #define PORT_PHY_QCFG_RESP_EEE_CONFIG_SFT 5
+ u8 parallel_detect;
+ #define PORT_PHY_QCFG_RESP_PARALLEL_DETECT 0x1UL
+ #define PORT_PHY_QCFG_RESP_RESERVED_MASK 0xfeUL
+ #define PORT_PHY_QCFG_RESP_RESERVED_SFT 1
__le16 link_partner_adv_speeds;
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MBHD 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100MB 0x2UL
@@ -1553,23 +1792,65 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_25GB 0x100UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_40GB 0x200UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_50GB 0x400UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_100GB 0x800UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MBHD 0x1000UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_SPEEDS_10MB 0x2000UL
u8 link_partner_adv_auto_mode;
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_NONE (0x0UL << 0)
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS (0x1UL << 0)
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED (0x2UL << 0)
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW (0x3UL << 0)
- #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_MASK (0x4UL << 0)
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK (0x4UL << 0)
u8 link_partner_adv_pause;
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_PAUSE_RX 0x2UL
- u8 unused_3;
+ __le16 adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le16 link_partner_adv_eee_link_speed_mask;
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 0x1UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB 0x2UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 0x4UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB 0x8UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 0x10UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 0x20UL
+ #define PORT_PHY_QCFG_RESP_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB 0x40UL
+ __le32 xcvr_identifier_type_tx_lpi_timer;
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK 0xffffffUL
+ #define PORT_PHY_QCFG_RESP_TX_LPI_TIMER_SFT 0
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_MASK 0xff000000UL
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFT 24
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_UNKNOWN (0x0UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_SFP (0x3UL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP (0xcUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFPPLUS (0xdUL << 24)
+ #define PORT_PHY_QCFG_RESP_XCVR_IDENTIFIER_TYPE_QSFP28 (0x11UL << 24)
+ __le16 fec_cfg;
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_SUPPORTED 0x2UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_AUTONEG_ENABLED 0x4UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_SUPPORTED 0x8UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
+ #define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
+ u8 unused_1;
+ u8 unused_2;
+ char phy_vendor_name[16];
+ char phy_vendor_partnumber[16];
+ __le32 unused_3;
u8 unused_4;
u8 unused_5;
+ u8 unused_6;
u8 valid;
};
/* hwrm_port_mac_cfg */
-/* Input (32 bytes) */
+/* Input (40 bytes) */
struct hwrm_port_mac_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -1581,6 +1862,12 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_FLAGS_COS_ASSIGNMENT_ENABLE 0x2UL
#define PORT_MAC_CFG_REQ_FLAGS_TUNNEL_PRI2COS_ENABLE 0x4UL
#define PORT_MAC_CFG_REQ_FLAGS_IP_DSCP2COS_ENABLE 0x8UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE 0x10UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE 0x20UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE 0x40UL
+ #define PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE 0x80UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_ENABLE 0x100UL
+ #define PORT_MAC_CFG_REQ_FLAGS_OOB_WOL_DISABLE 0x200UL
__le32 enables;
#define PORT_MAC_CFG_REQ_ENABLES_IPG 0x1UL
#define PORT_MAC_CFG_REQ_ENABLES_LPBK 0x2UL
@@ -1588,6 +1875,8 @@ struct hwrm_port_mac_cfg_input {
#define PORT_MAC_CFG_REQ_ENABLES_LCOS_MAP_PRI 0x8UL
#define PORT_MAC_CFG_REQ_ENABLES_TUNNEL_PRI2COS_MAP_PRI 0x10UL
#define PORT_MAC_CFG_REQ_ENABLES_DSCP2COS_MAP_PRI 0x20UL
+ #define PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE 0x40UL
+ #define PORT_MAC_CFG_REQ_ENABLES_TX_TS_CAPTURE_PTP_MSG_TYPE 0x80UL
__le16 port_id;
u8 ipg;
u8 lpbk;
@@ -1598,6 +1887,9 @@ struct hwrm_port_mac_cfg_input {
u8 lcos_map_pri;
u8 tunnel_pri2cos_map_pri;
u8 dscp2pri_map_pri;
+ __le16 rx_ts_capture_ptp_msg_type;
+ __le16 tx_ts_capture_ptp_msg_type;
+ __le32 unused_0;
};
/* Output (16 bytes) */
@@ -1754,6 +2046,113 @@ struct hwrm_port_blink_led_output {
u8 valid;
};
+/* hwrm_port_phy_qcaps */
+/* Input (24 bytes) */
+struct hwrm_port_phy_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (24 bytes) */
+struct hwrm_port_phy_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 eee_supported;
+ #define PORT_PHY_QCAPS_RESP_EEE_SUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_RSVD1_MASK 0xfeUL
+ #define PORT_PHY_QCAPS_RESP_RSVD1_SFT 1
+ u8 unused_0;
+ __le16 supported_speeds_force_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_10MB 0x2000UL
+ __le16 supported_speeds_auto_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MBHD 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GBHD 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2GB 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_2_5GB 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10GB 0x40UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_20GB 0x80UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_25GB 0x100UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_40GB 0x200UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_50GB 0x400UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_100GB 0x800UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MBHD 0x1000UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_AUTO_MODE_10MB 0x2000UL
+ __le16 supported_speeds_eee_mode;
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD1 0x1UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_100MB 0x2UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD2 0x4UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_1GB 0x8UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD3 0x10UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_RSVD4 0x20UL
+ #define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_EEE_MODE_10GB 0x40UL
+ __le32 tx_lpi_timer_low;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_SFT 0
+ #define PORT_PHY_QCAPS_RESP_RSVD2_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_RSVD2_SFT 24
+ __le32 valid_tx_lpi_timer_high;
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK 0xffffffUL
+ #define PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_SFT 0
+ #define PORT_PHY_QCAPS_RESP_VALID_MASK 0xff000000UL
+ #define PORT_PHY_QCAPS_RESP_VALID_SFT 24
+};
+
+/* hwrm_port_phy_i2c_read */
+/* Input (40 bytes) */
+struct hwrm_port_phy_i2c_read_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 flags;
+ __le32 enables;
+ #define PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET 0x1UL
+ __le16 port_id;
+ u8 i2c_slave_addr;
+ u8 unused_0;
+ __le16 page_number;
+ __le16 page_offset;
+ u8 data_length;
+ u8 unused_1[7];
+};
+
+/* Output (80 bytes) */
+struct hwrm_port_phy_i2c_read_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 data[16];
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
/* hwrm_queue_qportcfg */
/* Input (24 bytes) */
struct hwrm_queue_qportcfg_input {
@@ -1766,6 +2165,7 @@ struct hwrm_queue_qportcfg_input {
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH 0x1UL
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_QPORTCFG_REQ_FLAGS_PATH_LAST QUEUE_QPORTCFG_REQ_FLAGS_PATH_RX
__le16 port_id;
__le16 unused_0;
};
@@ -1838,6 +2238,7 @@ struct hwrm_queue_cfg_input {
#define QUEUE_CFG_REQ_FLAGS_PATH 0x1UL
#define QUEUE_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_CFG_REQ_FLAGS_PATH_LAST QUEUE_CFG_REQ_FLAGS_PATH_RX
__le32 enables;
#define QUEUE_CFG_REQ_ENABLES_DFLT_LEN 0x1UL
#define QUEUE_CFG_REQ_ENABLES_SERVICE_PROFILE 0x2UL
@@ -1875,6 +2276,7 @@ struct hwrm_queue_buffers_cfg_input {
#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH 0x1UL
#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_LAST QUEUE_BUFFERS_CFG_REQ_FLAGS_PATH_RX
__le32 enables;
#define QUEUE_BUFFERS_CFG_REQ_ENABLES_RESERVED 0x1UL
#define QUEUE_BUFFERS_CFG_REQ_ENABLES_SHARED 0x2UL
@@ -1952,6 +2354,7 @@ struct hwrm_queue_pri2cos_cfg_input {
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH 0x1UL
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_LAST QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_RX
#define QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN 0x2UL
__le32 enables;
u8 port_id;
@@ -2006,7 +2409,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID0_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id0_pri_lvl;
u8 queue_id0_bw_weight;
u8 queue_id1;
@@ -2016,7 +2419,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID1_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id1_pri_lvl;
u8 queue_id1_bw_weight;
u8 queue_id2;
@@ -2026,7 +2429,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID2_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id2_pri_lvl;
u8 queue_id2_bw_weight;
u8 queue_id3;
@@ -2036,7 +2439,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID3_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id3_pri_lvl;
u8 queue_id3_bw_weight;
u8 queue_id4;
@@ -2046,7 +2449,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID4_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id4_pri_lvl;
u8 queue_id4_bw_weight;
u8 queue_id5;
@@ -2056,7 +2459,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID5_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id5_pri_lvl;
u8 queue_id5_bw_weight;
u8 queue_id6;
@@ -2066,7 +2469,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID6_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id6_pri_lvl;
u8 queue_id6_bw_weight;
u8 queue_id7;
@@ -2076,7 +2479,7 @@ struct hwrm_queue_cos2bw_cfg_input {
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_SP (0x0UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_ETS (0x1UL << 0)
#define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_FIRST (0x2UL << 0)
- #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffffUL << 0)
+ #define QUEUE_COS2BW_CFG_REQ_QUEUE_ID7_TSA_ASSIGN_RESERVED_LAST (0xffUL << 0)
u8 queue_id7_pri_lvl;
u8 queue_id7_bw_weight;
u8 unused_1[5];
@@ -2158,6 +2561,8 @@ struct hwrm_vnic_cfg_input {
#define VNIC_CFG_REQ_FLAGS_DEFAULT 0x1UL
#define VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE 0x2UL
#define VNIC_CFG_REQ_FLAGS_BD_STALL_MODE 0x4UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE 0x8UL
+ #define VNIC_CFG_REQ_FLAGS_ROCE_ONLY_VNIC_MODE 0x10UL
__le32 enables;
#define VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP 0x1UL
#define VNIC_CFG_REQ_ENABLES_RSS_RULE 0x2UL
@@ -2622,6 +3027,7 @@ struct hwrm_cfa_l2_filter_alloc_input {
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH 0x1UL
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_LOOPBACK 0x2UL
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_DROP 0x4UL
#define CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST 0x8UL
@@ -2747,6 +3153,7 @@ struct hwrm_cfa_l2_filter_cfg_input {
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH 0x1UL
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_TX (0x0UL << 0)
#define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX (0x1UL << 0)
+ #define CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_LAST CFA_L2_FILTER_CFG_REQ_FLAGS_PATH_RX
#define CFA_L2_FILTER_CFG_REQ_FLAGS_DROP 0x2UL
__le32 enables;
#define CFA_L2_FILTER_CFG_REQ_ENABLES_DST_ID 0x1UL
@@ -2770,7 +3177,7 @@ struct hwrm_cfa_l2_filter_cfg_output {
};
/* hwrm_cfa_l2_set_rx_mask */
-/* Input (40 bytes) */
+/* Input (56 bytes) */
struct hwrm_cfa_l2_set_rx_mask_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -2785,9 +3192,15 @@ struct hwrm_cfa_l2_set_rx_mask_input {
#define CFA_L2_SET_RX_MASK_REQ_MASK_BCAST 0x8UL
#define CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS 0x10UL
#define CFA_L2_SET_RX_MASK_REQ_MASK_OUTERMOST 0x20UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLANONLY 0x40UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_VLAN_NONVLAN 0x80UL
+ #define CFA_L2_SET_RX_MASK_REQ_MASK_ANYVLAN_NONVLAN 0x100UL
__le64 mc_tbl_addr;
__le32 num_mc_entries;
__le32 unused_0;
+ __le64 vlan_tag_tbl_addr;
+ __le32 num_vlan_tags;
+ __le32 unused_1;
};
/* Output (16 bytes) */
@@ -3337,6 +3750,41 @@ struct hwrm_fw_reset_output {
u8 valid;
};
+/* hwrm_fw_qstatus */
+/* Input (24 bytes) */
+struct hwrm_fw_qstatus_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 embedded_proc_type;
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_BOOT (0x0UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT (0x1UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL (0x2UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE (0x3UL << 0)
+ #define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD (0x4UL << 0)
+ u8 unused_0[7];
+};
+
+/* Output (16 bytes) */
+struct hwrm_fw_qstatus_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 selfrst_status;
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTNONE (0x0UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTASAP (0x1UL << 0)
+ #define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST (0x2UL << 0)
+ u8 unused_0;
+ __le16 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 unused_4;
+ u8 valid;
+};
+
/* hwrm_exec_fwd_resp */
/* Input (128 bytes) */
struct hwrm_exec_fwd_resp_input {
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
index 43ef392c8588..73f2249555b5 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_nvm_defs.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -13,6 +13,7 @@
enum bnxt_nvm_directory_type {
BNX_DIR_TYPE_UNUSED = 0,
BNX_DIR_TYPE_PKG_LOG = 1,
+ BNX_DIR_TYPE_UPDATE = 2,
BNX_DIR_TYPE_CHIMP_PATCH = 3,
BNX_DIR_TYPE_BOOTCODE = 4,
BNX_DIR_TYPE_VPD = 5,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 0c5f510492f1..50d2007a2640 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -143,6 +143,9 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
u16 vlan_tag;
int rc;
+ if (bp->hwrm_spec_code < 0x10201)
+ return -ENOTSUPP;
+
rc = bnxt_vf_ndo_prep(bp, vf_id);
if (rc)
return rc;
@@ -771,12 +774,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
PORT_PHY_QCFG_RESP_LINK_NO_LINK) {
phy_qcfg_resp.link =
PORT_PHY_QCFG_RESP_LINK_LINK;
- if (phy_qcfg_resp.auto_link_speed)
- phy_qcfg_resp.link_speed =
- phy_qcfg_resp.auto_link_speed;
- else
- phy_qcfg_resp.link_speed =
- phy_qcfg_resp.force_link_speed;
+ phy_qcfg_resp.link_speed = cpu_to_le16(
+ PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
phy_qcfg_resp.duplex =
PORT_PHY_QCFG_RESP_DUPLEX_FULL;
phy_qcfg_resp.pause =
@@ -859,8 +858,8 @@ void bnxt_update_vf_mac(struct bnxt *bp)
* default but the stored zero MAC will allow the VF user to change
* the random MAC address using ndo_set_mac_address() if he wants.
*/
- if (!ether_addr_equal(resp->perm_mac_address, bp->vf.mac_addr))
- memcpy(bp->vf.mac_addr, resp->perm_mac_address, ETH_ALEN);
+ if (!ether_addr_equal(resp->mac_address, bp->vf.mac_addr))
+ memcpy(bp->vf.mac_addr, resp->mac_address, ETH_ALEN);
/* overwrite netdev dev_addr with admin VF MAC */
if (is_valid_ether_addr(bp->vf.mac_addr))
@@ -869,6 +868,31 @@ update_vf_mac_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
}
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+{
+ struct hwrm_func_vf_cfg_input req = {0};
+ int rc = 0;
+
+ if (!BNXT_VF(bp))
+ return 0;
+
+ if (bp->hwrm_spec_code < 0x10202) {
+ if (is_valid_ether_addr(bp->vf.mac_addr))
+ rc = -EADDRNOTAVAIL;
+ goto mac_done;
+ }
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
+ req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
+ memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+mac_done:
+ if (rc) {
+ rc = -EADDRNOTAVAIL;
+ netdev_warn(bp->dev, "VF MAC address %pM not approved by the PF\n",
+ mac);
+ }
+ return rc;
+}
#else
void bnxt_sriov_disable(struct bnxt *bp)
@@ -883,4 +907,9 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
void bnxt_update_vf_mac(struct bnxt *bp)
{
}
+
+int bnxt_approve_mac(struct bnxt *bp, u8 *mac)
+{
+ return 0;
+}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index c151280e3980..0392670ab49c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -1,6 +1,6 @@
/* Broadcom NetXtreme-C/E network driver.
*
- * Copyright (c) 2014-2015 Broadcom Corporation
+ * Copyright (c) 2014-2016 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -20,4 +20,5 @@ int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
+int bnxt_approve_mac(struct bnxt *, u8 *);
#endif
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index b69dc58faeab..b1d2ac818710 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -5350,7 +5350,10 @@ static int cnic_start_hw(struct cnic_dev *dev)
return 0;
err1:
- cp->free_resc(dev);
+ if (ethdev->drv_state & CNIC_DRV_STATE_HANDLES_IRQ)
+ cp->stop_hw(dev);
+ else
+ cp->free_resc(dev);
pci_dev_put(dev->pcidev);
return err;
}
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 44ad1490b472..541456398dfb 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -104,8 +104,8 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
static inline void dmadesc_set(struct bcmgenet_priv *priv,
void __iomem *d, dma_addr_t addr, u32 val)
{
- dmadesc_set_length_status(priv, d, val);
dmadesc_set_addr(priv, d, addr);
+ dmadesc_set_length_status(priv, d, val);
}
static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
@@ -1225,8 +1225,10 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
dev->stats.tx_packets += pkts_compl;
dev->stats.tx_bytes += bytes_compl;
+ txq = netdev_get_tx_queue(dev, ring->queue);
+ netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
- txq = netdev_get_tx_queue(dev, ring->queue);
if (netif_tx_queue_stopped(txq))
netif_tx_wake_queue(txq);
}
@@ -1335,6 +1337,7 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct enet_cb *tx_cb_ptr;
+ unsigned int frag_size;
dma_addr_t mapping;
int ret;
@@ -1342,10 +1345,12 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
if (unlikely(!tx_cb_ptr))
BUG();
+
tx_cb_ptr->skb = NULL;
- mapping = skb_frag_dma_map(kdev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
+ frag_size = skb_frag_size(frag);
+
+ mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE);
ret = dma_mapping_error(kdev, mapping);
if (ret) {
priv->mib.tx_dma_failed++;
@@ -1355,10 +1360,10 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
}
dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
- dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size);
dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
- (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
(priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
return 0;
@@ -1451,15 +1456,19 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
else
index -= 1;
- nr_frags = skb_shinfo(skb)->nr_frags;
ring = &priv->tx_rings[index];
txq = netdev_get_tx_queue(dev, ring->queue);
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
spin_lock_irqsave(&ring->lock, flags);
- if (ring->free_bds <= nr_frags + 1) {
- netif_tx_stop_queue(txq);
- netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
- __func__, index, ring->queue);
+ if (ring->free_bds <= (nr_frags + 1)) {
+ if (!netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
+ netdev_err(dev,
+ "%s: tx ring %d full when queue %d awake\n",
+ __func__, index, ring->queue);
+ }
ret = NETDEV_TX_BUSY;
goto out;
}
@@ -1513,6 +1522,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
ring->prod_index += nr_frags + 1;
ring->prod_index &= DMA_P_INDEX_MASK;
+ netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
+
if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
netif_tx_stop_queue(txq);
@@ -1732,7 +1743,7 @@ static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
work_done = bcmgenet_desc_rx(ring, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
ring->int_enable(ring);
}
@@ -2361,6 +2372,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
{
int i;
+ struct netdev_queue *txq;
bcmgenet_fini_rx_napi(priv);
bcmgenet_fini_tx_napi(priv);
@@ -2375,6 +2387,14 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
}
}
+ for (i = 0; i < priv->hw_params->tx_queues; i++) {
+ txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
+ netdev_tx_reset_queue(txq);
+ }
+
+ txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
+ netdev_tx_reset_queue(txq);
+
bcmgenet_free_rx_buffers(priv);
kfree(priv->rx_cbs);
kfree(priv->tx_cbs);
@@ -2490,7 +2510,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
if (likely(napi_schedule_prep(&rx_ring->napi))) {
rx_ring->int_disable(rx_ring);
- __napi_schedule(&rx_ring->napi);
+ __napi_schedule_irqoff(&rx_ring->napi);
}
}
@@ -2503,7 +2523,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
if (likely(napi_schedule_prep(&tx_ring->napi))) {
tx_ring->int_disable(tx_ring);
- __napi_schedule(&tx_ring->napi);
+ __napi_schedule_irqoff(&tx_ring->napi);
}
}
@@ -2533,7 +2553,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
if (likely(napi_schedule_prep(&rx_ring->napi))) {
rx_ring->int_disable(rx_ring);
- __napi_schedule(&rx_ring->napi);
+ __napi_schedule_irqoff(&rx_ring->napi);
}
}
@@ -2542,7 +2562,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
if (likely(napi_schedule_prep(&tx_ring->napi))) {
tx_ring->int_disable(tx_ring);
- __napi_schedule(&tx_ring->napi);
+ __napi_schedule_irqoff(&tx_ring->napi);
}
}
@@ -3039,7 +3059,7 @@ static void bcmgenet_timeout(struct net_device *dev)
bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev->stats.tx_errors++;
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index eacc559679bf..f1b81187a201 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2462,7 +2462,7 @@ static void sbmac_tx_timeout (struct net_device *dev)
spin_lock_irqsave(&sc->sbm_lock, flags);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
spin_unlock_irqrestore(&sc->sbm_lock, flags);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3010080cfeee..ea967df4b202 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7383,7 +7383,7 @@ static void tg3_napi_fini(struct tg3 *tp)
static inline void tg3_netif_stop(struct tg3 *tp)
{
- tp->dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(tp->dev); /* prevent tx timeout */
tg3_napi_disable(tp);
netif_carrier_off(tp->dev);
netif_tx_disable(tp->dev);
@@ -12552,10 +12552,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
info->data = TG3_RSS_MAX_NUM_QS;
}
- /* The first interrupt vector only
- * handles link interrupts.
- */
- info->data -= 1;
return 0;
default:
@@ -14014,7 +14010,9 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
}
if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
+ (!ec->rx_coalesce_usecs) ||
(ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
+ (!ec->tx_coalesce_usecs) ||
(ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
(ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
(ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
@@ -14025,16 +14023,6 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
(ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
return -EINVAL;
- /* No rx interrupts will be generated if both are zero */
- if ((ec->rx_coalesce_usecs == 0) &&
- (ec->rx_max_coalesced_frames == 0))
- return -EINVAL;
-
- /* No tx interrupts will be generated if both are zero */
- if ((ec->tx_coalesce_usecs == 0) &&
- (ec->tx_max_coalesced_frames == 0))
- return -EINVAL;
-
/* Only copy relevant parameters, ignore all others. */
tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
@@ -18134,14 +18122,14 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
- /* We needn't recover from permanent error */
- if (state == pci_channel_io_frozen)
- tp->pcierr_recovery = true;
-
/* We probably don't have netdev yet */
if (!netdev || !netif_running(netdev))
goto done;
+ /* We needn't recover from permanent error */
+ if (state == pci_channel_io_frozen)
+ tp->pcierr_recovery = true;
+
tg3_phy_stop(tp);
tg3_netif_stop(tp);
@@ -18238,7 +18226,7 @@ static void tg3_io_resume(struct pci_dev *pdev)
rtnl_lock();
- if (!netif_running(netdev))
+ if (!netdev || !netif_running(netdev))
goto done;
tg3_full_lock(tp, 0);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 8fc246ea1fb8..05c1c1dd7751 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -312,7 +312,8 @@ bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
struct bnad_debug_info *regrd_debug = file->private_data;
struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
- int addr, len, rc, i;
+ int rc, i;
+ u32 addr, len;
u32 *regbuf;
void __iomem *rb, *reg_addr;
unsigned long flags;
@@ -372,7 +373,8 @@ bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
struct bnad_debug_info *debug = file->private_data;
struct bnad *bnad = (struct bnad *)debug->i_private;
struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
- int addr, val, rc;
+ int rc;
+ u32 addr, val;
void __iomem *reg_addr;
unsigned long flags;
void *kern_buf;
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 0e4fdc3dd729..31f61a744d66 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -31,15 +31,10 @@
#define BNAD_NUM_TXF_COUNTERS 12
#define BNAD_NUM_RXF_COUNTERS 10
#define BNAD_NUM_CQ_COUNTERS (3 + 5)
-#define BNAD_NUM_RXQ_COUNTERS 6
+#define BNAD_NUM_RXQ_COUNTERS 7
#define BNAD_NUM_TXQ_COUNTERS 5
-#define BNAD_ETHTOOL_STATS_NUM \
- (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
- sizeof(struct bnad_drv_stats) / sizeof(u64) + \
- offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
-
-static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+static const char *bnad_net_stats_strings[] = {
"rx_packets",
"tx_packets",
"rx_bytes",
@@ -50,22 +45,10 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"tx_dropped",
"multicast",
"collisions",
-
"rx_length_errors",
- "rx_over_errors",
"rx_crc_errors",
"rx_frame_errors",
- "rx_fifo_errors",
- "rx_missed_errors",
-
- "tx_aborted_errors",
- "tx_carrier_errors",
"tx_fifo_errors",
- "tx_heartbeat_errors",
- "tx_window_errors",
-
- "rx_compressed",
- "tx_compressed",
"netif_queue_stop",
"netif_queue_wakeup",
@@ -254,6 +237,8 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"fc_tx_fid_parity_errors",
};
+#define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings)
+
static int
bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
{
@@ -658,6 +643,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_allocbuf_failed", q_num);
string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_mapbuf_failed", q_num);
+ string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_producer_index", q_num);
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_consumer_index", q_num);
@@ -678,6 +665,9 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
sprintf(string, "rxq%d_allocbuf_failed",
q_num);
string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_mapbuf_failed",
+ q_num);
+ string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_producer_index",
q_num);
string += ETH_GSTRING_LEN;
@@ -854,9 +844,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
u64 *buf)
{
struct bnad *bnad = netdev_priv(netdev);
- int i, j, bi;
+ int i, j, bi = 0;
unsigned long flags;
- struct rtnl_link_stats64 *net_stats64;
+ struct rtnl_link_stats64 net_stats64;
u64 *stats64;
u32 bmap;
@@ -871,14 +861,25 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
* under the same lock
*/
spin_lock_irqsave(&bnad->bna_lock, flags);
- bi = 0;
- memset(buf, 0, stats->n_stats * sizeof(u64));
-
- net_stats64 = (struct rtnl_link_stats64 *)buf;
- bnad_netdev_qstats_fill(bnad, net_stats64);
- bnad_netdev_hwstats_fill(bnad, net_stats64);
- bi = sizeof(*net_stats64) / sizeof(u64);
+ memset(&net_stats64, 0, sizeof(net_stats64));
+ bnad_netdev_qstats_fill(bnad, &net_stats64);
+ bnad_netdev_hwstats_fill(bnad, &net_stats64);
+
+ buf[bi++] = net_stats64.rx_packets;
+ buf[bi++] = net_stats64.tx_packets;
+ buf[bi++] = net_stats64.rx_bytes;
+ buf[bi++] = net_stats64.tx_bytes;
+ buf[bi++] = net_stats64.rx_errors;
+ buf[bi++] = net_stats64.tx_errors;
+ buf[bi++] = net_stats64.rx_dropped;
+ buf[bi++] = net_stats64.tx_dropped;
+ buf[bi++] = net_stats64.multicast;
+ buf[bi++] = net_stats64.collisions;
+ buf[bi++] = net_stats64.rx_length_errors;
+ buf[bi++] = net_stats64.rx_crc_errors;
+ buf[bi++] = net_stats64.rx_frame_errors;
+ buf[bi++] = net_stats64.tx_fifo_errors;
/* Get netif_queue_stopped from stack */
bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index a63551d0a18a..d954a97b0b0b 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -61,8 +61,7 @@
#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
#define MACB_WOL_ENABLED (0x1 << 1)
-/*
- * Graceful stop timeouts in us. We should allow up to
+/* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/
#define MACB_HALT_TIMEOUT 1230
@@ -130,9 +129,8 @@ static void hw_writel(struct macb *bp, int offset, u32 value)
writel_relaxed(value, bp->regs + offset);
}
-/*
- * Find the CPU endianness by using the loopback bit of NCR register. When the
- * CPU is in big endian we need to program swaped mode for management
+/* Find the CPU endianness by using the loopback bit of NCR register. When the
+ * CPU is in big endian we need to program swapped mode for management
* descriptor access.
*/
static bool hw_is_native_io(void __iomem *addr)
@@ -189,7 +187,7 @@ static void macb_get_hwaddr(struct macb *bp)
pdata = dev_get_platdata(&bp->pdev->dev);
- /* Check all 4 address register for vaild address */
+ /* Check all 4 address register for valid address */
for (i = 0; i < 4; i++) {
bottom = macb_or_gem_readl(bp, SA1B + i * 8);
top = macb_or_gem_readl(bp, SA1T + i * 8);
@@ -297,7 +295,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
ferr = DIV_ROUND_UP(ferr, rate / 100000);
if (ferr > 5)
netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
- rate);
+ rate);
if (clk_set_rate(clk, rate_rounded))
netdev_err(dev, "adjusting tx_clk failed.\n");
@@ -306,7 +304,7 @@ static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
static void macb_handle_link_change(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
int status_change = 0;
@@ -386,7 +384,8 @@ static int macb_mii_probe(struct net_device *dev)
pdata = dev_get_platdata(&bp->pdev->dev);
if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
- ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
+ ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
+ "phy int");
if (!ret) {
phy_irq = gpio_to_irq(pdata->phy_irq_pin);
phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
@@ -415,7 +414,6 @@ static int macb_mii_probe(struct net_device *dev)
bp->link = 0;
bp->speed = 0;
bp->duplex = -1;
- bp->phy_dev = phydev;
return 0;
}
@@ -430,7 +428,7 @@ static int macb_mii_init(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(MPE));
bp->mii_bus = mdiobus_alloc();
- if (bp->mii_bus == NULL) {
+ if (!bp->mii_bus) {
err = -ENOMEM;
goto err_out;
}
@@ -439,7 +437,7 @@ static int macb_mii_init(struct macb *bp)
bp->mii_bus->read = &macb_mdio_read;
bp->mii_bus->write = &macb_mdio_write;
snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
- bp->pdev->name, bp->pdev->id);
+ bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
bp->mii_bus->parent = &bp->pdev->dev;
pdata = dev_get_platdata(&bp->pdev->dev);
@@ -452,7 +450,8 @@ static int macb_mii_init(struct macb *bp)
err = of_mdiobus_register(bp->mii_bus, np);
/* fallback to standard phy registration if no phy were
- found during dt phy registration */
+ * found during dt phy registration
+ */
if (!err && !phy_find_first(bp->mii_bus)) {
for (i = 0; i < PHY_MAX_ADDR; i++) {
struct phy_device *phydev;
@@ -500,7 +499,7 @@ static void macb_update_stats(struct macb *bp)
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
- for(; p < end; p++, offset += 4)
+ for (; p < end; p++, offset += 4)
*p += bp->macb_reg_readl(bp, offset);
}
@@ -568,8 +567,7 @@ static void macb_tx_error_task(struct work_struct *work)
/* Make sure nobody is trying to queue up new packets */
netif_tx_stop_all_queues(bp->dev);
- /*
- * Stop transmission now
+ /* Stop transmission now
* (in case we have just queued new packets)
* macb/gem must be halted to write TBQP register
*/
@@ -577,8 +575,7 @@ static void macb_tx_error_task(struct work_struct *work)
/* Just complain for now, reinitializing TX path can be good */
netdev_err(bp->dev, "BUG: halt tx timed out\n");
- /*
- * Treat frames in TX queue including the ones that caused the error.
+ /* Treat frames in TX queue including the ones that caused the error.
* Free transmit buffers in upper layer.
*/
for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
@@ -608,10 +605,9 @@ static void macb_tx_error_task(struct work_struct *work)
bp->stats.tx_bytes += skb->len;
}
} else {
- /*
- * "Buffers exhausted mid-frame" errors may only happen
- * if the driver is buggy, so complain loudly about those.
- * Statistics are updated by hardware.
+ /* "Buffers exhausted mid-frame" errors may only happen
+ * if the driver is buggy, so complain loudly about
+ * those. Statistics are updated by hardware.
*/
if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
netdev_err(bp->dev,
@@ -663,7 +659,7 @@ static void macb_tx_interrupt(struct macb_queue *queue)
queue_writel(queue, ISR, MACB_BIT(TCOMP));
netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
- (unsigned long)status);
+ (unsigned long)status);
head = queue->tx_head;
for (tail = queue->tx_tail; tail != head; tail++) {
@@ -723,7 +719,8 @@ static void gem_rx_refill(struct macb *bp)
struct sk_buff *skb;
dma_addr_t paddr;
- while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
+ while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
+ RX_RING_SIZE) > 0) {
entry = macb_rx_ring_wrap(bp->rx_prepared_head);
/* Make hw descriptor updates visible to CPU */
@@ -731,10 +728,10 @@ static void gem_rx_refill(struct macb *bp)
bp->rx_prepared_head++;
- if (bp->rx_skbuff[entry] == NULL) {
+ if (!bp->rx_skbuff[entry]) {
/* allocate sk_buff for this free entry in ring */
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
- if (unlikely(skb == NULL)) {
+ if (unlikely(!skb)) {
netdev_err(bp->dev,
"Unable to allocate sk_buff\n");
break;
@@ -742,7 +739,8 @@ static void gem_rx_refill(struct macb *bp)
/* now fill corresponding descriptor entry */
paddr = dma_map_single(&bp->pdev->dev, skb->data,
- bp->rx_buffer_size, DMA_FROM_DEVICE);
+ bp->rx_buffer_size,
+ DMA_FROM_DEVICE);
if (dma_mapping_error(&bp->pdev->dev, paddr)) {
dev_kfree_skb(skb);
break;
@@ -767,7 +765,7 @@ static void gem_rx_refill(struct macb *bp)
wmb();
netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
- bp->rx_prepared_head, bp->rx_tail);
+ bp->rx_prepared_head, bp->rx_tail);
}
/* Mark DMA descriptors from begin up to and not including end as unused */
@@ -778,14 +776,14 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
for (frag = begin; frag != end; frag++) {
struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+
desc->addr &= ~MACB_BIT(RX_USED);
}
/* Make descriptor updates visible to hardware */
wmb();
- /*
- * When this happens, the hardware stats registers for
+ /* When this happens, the hardware stats registers for
* whatever caused this is updated, so we don't have to record
* anything.
*/
@@ -881,11 +879,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
len = desc->ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
- macb_rx_ring_wrap(first_frag),
- macb_rx_ring_wrap(last_frag), len);
+ macb_rx_ring_wrap(first_frag),
+ macb_rx_ring_wrap(last_frag), len);
- /*
- * The ethernet header starts NET_IP_ALIGN bytes into the
+ /* The ethernet header starts NET_IP_ALIGN bytes into the
* first buffer. Since the header is 14 bytes, this makes the
* payload word-aligned.
*
@@ -925,7 +922,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
frag_len = len - offset;
}
skb_copy_to_linear_data_offset(skb, offset,
- macb_rx_buffer(bp, frag), frag_len);
+ macb_rx_buffer(bp, frag),
+ frag_len);
offset += bp->rx_buffer_size;
desc = macb_rx_desc(bp, frag);
desc->addr &= ~MACB_BIT(RX_USED);
@@ -943,7 +941,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
bp->stats.rx_packets++;
bp->stats.rx_bytes += skb->len;
netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
- skb->len, skb->csum);
+ skb->len, skb->csum);
netif_receive_skb(skb);
return 0;
@@ -1050,7 +1048,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
work_done = 0;
netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
- (unsigned long)status, budget);
+ (unsigned long)status, budget);
work_done = bp->macbgem_ops.mog_rx(bp, budget);
if (work_done < budget) {
@@ -1100,8 +1098,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
(unsigned long)status);
if (status & MACB_RX_INT_FLAGS) {
- /*
- * There's no point taking any more interrupts
+ /* There's no point taking any more interrupts
* until we have processed the buffers. The
* scheduling call may fail if the poll routine
* is already scheduled, so disable interrupts
@@ -1130,8 +1127,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(TCOMP))
macb_tx_interrupt(queue);
- /*
- * Link change detection isn't possible with RMII, so we'll
+ /* Link change detection isn't possible with RMII, so we'll
* add that if/when we get our hands on a full-blown MII PHY.
*/
@@ -1162,8 +1158,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
if (status & MACB_BIT(HRESP)) {
- /*
- * TODO: Reset the hardware, and maybe move the
+ /* TODO: Reset the hardware, and maybe move the
* netdev_err to a lower-priority context as well
* (work queue?)
*/
@@ -1182,8 +1177,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-/*
- * Polling receive - used by netconsole and other diagnostic tools
+/* Polling receive - used by netconsole and other diagnostic tools
* to allow network i/o with interrupts disabled.
*/
static void macb_poll_controller(struct net_device *dev)
@@ -1269,7 +1263,7 @@ static unsigned int macb_tx_map(struct macb *bp,
}
/* Should never happen */
- if (unlikely(tx_skb == NULL)) {
+ if (unlikely(!tx_skb)) {
netdev_err(bp->dev, "BUG! empty skb!\n");
return 0;
}
@@ -1329,6 +1323,24 @@ dma_error:
return 0;
}
+static inline int macb_clear_csum(struct sk_buff *skb)
+{
+ /* no change for packets without checksum offloading */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ /* make sure we can modify the header */
+ if (unlikely(skb_cow_head(skb, 0)))
+ return -1;
+
+ /* initialize checksum field
+ * This is required - at least for Zynq, which otherwise calculates
+ * wrong UDP header checksums for UDP packets with UDP data len <=2
+ */
+ *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
+ return 0;
+}
+
static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
u16 queue_index = skb_get_queue_mapping(skb);
@@ -1339,16 +1351,16 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
#if defined(DEBUG) && defined(VERBOSE_DEBUG)
netdev_vdbg(bp->dev,
- "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
- queue_index, skb->len, skb->head, skb->data,
- skb_tail_pointer(skb), skb_end_pointer(skb));
+ "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
+ queue_index, skb->len, skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb));
print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
skb->data, 16, true);
#endif
/* Count how many TX buffer descriptors are needed to send this
* socket buffer: skb fragments of jumbo frames may need to be
- * splitted into many buffer descriptors.
+ * split into many buffer descriptors.
*/
count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
nr_frags = skb_shinfo(skb)->nr_frags;
@@ -1368,6 +1380,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
+ if (macb_clear_csum(skb)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
/* Map socket buffer for DMA transfer */
if (!macb_tx_map(bp, queue, skb)) {
dev_kfree_skb_any(skb);
@@ -1399,8 +1416,8 @@ static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
netdev_dbg(bp->dev,
- "RX buffer must be multiple of %d bytes, expanding\n",
- RX_BUFFER_MULTIPLE);
+ "RX buffer must be multiple of %d bytes, expanding\n",
+ RX_BUFFER_MULTIPLE);
bp->rx_buffer_size =
roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
}
@@ -1423,7 +1440,7 @@ static void gem_free_rx_buffers(struct macb *bp)
for (i = 0; i < RX_RING_SIZE; i++) {
skb = bp->rx_skbuff[i];
- if (skb == NULL)
+ if (!skb)
continue;
desc = &bp->rx_ring[i];
@@ -1479,10 +1496,10 @@ static int gem_alloc_rx_buffers(struct macb *bp)
bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
if (!bp->rx_skbuff)
return -ENOMEM;
- else
- netdev_dbg(bp->dev,
- "Allocated %d RX struct sk_buff entries at %p\n",
- RX_RING_SIZE, bp->rx_skbuff);
+
+ netdev_dbg(bp->dev,
+ "Allocated %d RX struct sk_buff entries at %p\n",
+ RX_RING_SIZE, bp->rx_skbuff);
return 0;
}
@@ -1495,10 +1512,10 @@ static int macb_alloc_rx_buffers(struct macb *bp)
&bp->rx_buffers_dma, GFP_KERNEL);
if (!bp->rx_buffers)
return -ENOMEM;
- else
- netdev_dbg(bp->dev,
- "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+
+ netdev_dbg(bp->dev,
+ "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
return 0;
}
@@ -1589,8 +1606,7 @@ static void macb_reset_hw(struct macb *bp)
struct macb_queue *queue;
unsigned int q;
- /*
- * Disable RX and TX (XXX: Should we halt the transmission
+ /* Disable RX and TX (XXX: Should we halt the transmission
* more gracefully?)
*/
macb_writel(bp, NCR, 0);
@@ -1653,8 +1669,7 @@ static u32 macb_mdc_clk_div(struct macb *bp)
return config;
}
-/*
- * Get the DMA bus width field of the network configuration register that we
+/* Get the DMA bus width field of the network configuration register that we
* should program. We find the width from decoding the design configuration
* register to find the maximum supported data bus width.
*/
@@ -1674,8 +1689,7 @@ static u32 macb_dbw(struct macb *bp)
}
}
-/*
- * Configure the receive DMA engine
+/* Configure the receive DMA engine
* - use the correct receive buffer size
* - set best burst length for DMA operations
* (if not supported by FIFO, it will fallback to default)
@@ -1763,8 +1777,7 @@ static void macb_init_hw(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
}
-/*
- * The hash address register is 64 bits long and takes up two
+/* The hash address register is 64 bits long and takes up two
* locations in the memory map. The least significant bits are stored
* in EMAC_HSL and the most significant bits in EMAC_HSH.
*
@@ -1804,9 +1817,7 @@ static inline int hash_bit_value(int bitnr, __u8 *addr)
return 0;
}
-/*
- * Return the hash index value for the specified address.
- */
+/* Return the hash index value for the specified address. */
static int hash_get_index(__u8 *addr)
{
int i, j, bitval;
@@ -1822,9 +1833,7 @@ static int hash_get_index(__u8 *addr)
return hash_index;
}
-/*
- * Add multicast addresses to the internal multicast-hash table.
- */
+/* Add multicast addresses to the internal multicast-hash table. */
static void macb_sethashtable(struct net_device *dev)
{
struct netdev_hw_addr *ha;
@@ -1832,7 +1841,8 @@ static void macb_sethashtable(struct net_device *dev)
unsigned int bitnr;
struct macb *bp = netdev_priv(dev);
- mc_filter[0] = mc_filter[1] = 0;
+ mc_filter[0] = 0;
+ mc_filter[1] = 0;
netdev_for_each_mc_addr(ha, dev) {
bitnr = hash_get_index(ha->addr);
@@ -1843,9 +1853,7 @@ static void macb_sethashtable(struct net_device *dev)
macb_or_gem_writel(bp, HRT, mc_filter[1]);
}
-/*
- * Enable/Disable promiscuous and multicast modes.
- */
+/* Enable/Disable promiscuous and multicast modes. */
static void macb_set_rx_mode(struct net_device *dev)
{
unsigned long cfg;
@@ -1900,7 +1908,7 @@ static int macb_open(struct net_device *dev)
netif_carrier_off(dev);
/* if the phy is not yet register, retry later*/
- if (!bp->phy_dev)
+ if (!dev->phydev)
return -EAGAIN;
/* RX buffers initialization */
@@ -1919,7 +1927,7 @@ static int macb_open(struct net_device *dev)
macb_init_hw(bp);
/* schedule a link state check */
- phy_start(bp->phy_dev);
+ phy_start(dev->phydev);
netif_tx_start_all_queues(dev);
@@ -1934,8 +1942,8 @@ static int macb_close(struct net_device *dev)
netif_tx_stop_all_queues(dev);
napi_disable(&bp->napi);
- if (bp->phy_dev)
- phy_stop(bp->phy_dev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp);
@@ -2106,28 +2114,6 @@ static struct net_device_stats *macb_get_stats(struct net_device *dev)
return nstat;
}
-static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static int macb_get_regs_len(struct net_device *netdev)
{
return MACB_GREGS_NBR * sizeof(u32);
@@ -2162,9 +2148,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
regs_buff[12] = macb_or_gem_readl(bp, USRIO);
- if (macb_is_gem(bp)) {
+ if (macb_is_gem(bp))
regs_buff[13] = gem_readl(bp, DMACFG);
- }
}
static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
@@ -2201,19 +2186,17 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops macb_ethtool_ops = {
- .get_settings = macb_get_settings,
- .set_settings = macb_set_settings,
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
.get_wol = macb_get_wol,
.set_wol = macb_set_wol,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct ethtool_ops gem_ethtool_ops = {
- .get_settings = macb_get_settings,
- .set_settings = macb_set_settings,
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
@@ -2221,12 +2204,13 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_ethtool_stats = gem_get_ethtool_stats,
.get_strings = gem_get_ethtool_strings,
.get_sset_count = gem_get_sset_count,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
@@ -2287,11 +2271,11 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_set_features = macb_set_features,
};
-/*
- * Configure peripheral capabilities according to device tree
+/* Configure peripheral capabilities according to device tree
* and integration options used
*/
-static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf)
+static void macb_configure_caps(struct macb *bp,
+ const struct macb_config *dt_conf)
{
u32 dcfg;
@@ -2585,7 +2569,7 @@ static int at91ether_open(struct net_device *dev)
MACB_BIT(HRESP));
/* schedule a link state check */
- phy_start(lp->phy_dev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
@@ -2989,7 +2973,7 @@ static int macb_probe(struct platform_device *pdev)
mac = of_get_mac_address(np);
if (mac)
- memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+ ether_addr_copy(bp->dev->dev_addr, mac);
else
macb_get_hwaddr(bp);
@@ -2997,6 +2981,7 @@ static int macb_probe(struct platform_device *pdev)
phy_node = of_get_next_available_child(np, NULL);
if (phy_node) {
int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
+
if (gpio_is_valid(gpio)) {
bp->reset_gpio = gpio_to_desc(gpio);
gpiod_direction_output(bp->reset_gpio, 1);
@@ -3024,7 +3009,7 @@ static int macb_probe(struct platform_device *pdev)
if (err)
goto err_out_free_netdev;
- phydev = bp->phy_dev;
+ phydev = dev->phydev;
netif_carrier_off(dev);
@@ -3043,7 +3028,7 @@ static int macb_probe(struct platform_device *pdev)
return 0;
err_out_unregister_mdio:
- phy_disconnect(bp->phy_dev);
+ phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
@@ -3071,8 +3056,8 @@ static int macb_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
- if (bp->phy_dev)
- phy_disconnect(bp->phy_dev);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 8a13824ef802..b6fcf10621b6 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -403,11 +403,11 @@
#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004
#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
#define MACB_CAPS_USRIO_DISABLED 0x00000010
+#define MACB_CAPS_JUMBO 0x00000020
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
-#define MACB_CAPS_JUMBO 0x00000010
/* Bit manipulation macros */
#define MACB_BIT(name) \
@@ -823,7 +823,6 @@ struct macb {
struct macb_or_gem_ops macbgem_ops;
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
int link;
int speed;
int duplex;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
index 8ad7425f89bf..c03d37016a48 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.c
@@ -19,26 +19,16 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
-#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
int lio_cn6xxx_soft_reset(struct octeon_device *oct)
{
@@ -74,9 +64,9 @@ void lio_cn6xxx_enable_error_reporting(struct octeon_device *oct)
u32 val;
pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_DEVCTL, &val);
- if (val & 0x000f0000) {
+ if (val & 0x000c0000) {
dev_err(&oct->pci_dev->dev, "PCI-E Link error detected: 0x%08x\n",
- val & 0x000f0000);
+ val & 0x000c0000);
}
val |= 0xf; /* Enable Link error reporting */
@@ -229,7 +219,7 @@ void lio_cn6xxx_setup_global_output_regs(struct octeon_device *oct)
/* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_BMODE, 0);
- /* / Select ES,RO,NS setting from register for Output Queue Packet
+ /* Select ES, RO, NS setting from register for Output Queue Packet
* Address
*/
octeon_write_csr(oct, CN6XXX_SLI_PKT_DPADDR, 0xFFFFFFFF);
@@ -367,7 +357,8 @@ void lio_cn6xxx_enable_io_queues(struct octeon_device *oct)
void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
{
- u32 mask, i, loop = HZ;
+ int i;
+ u32 mask, loop = HZ;
u32 d32;
/* Reset the Enable bits for Input Queues. */
@@ -376,7 +367,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, mask);
/* Wait until hardware indicates that the queues are out of reset. */
- mask = oct->io_qmask.iq;
+ mask = (u32)oct->io_qmask.iq;
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
while (((d32 & mask) != mask) && loop--) {
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_IQ);
@@ -384,8 +375,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
}
/* Reset the doorbell register for each Input queue. */
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- if (!(oct->io_qmask.iq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & (1ULL << i)))
continue;
octeon_write_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i), 0xFFFFFFFF);
d32 = octeon_read_csr(oct, CN6XXX_SLI_IQ_DOORBELL(i));
@@ -398,7 +389,7 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
/* Wait until hardware indicates that the queues are out of reset. */
loop = HZ;
- mask = oct->io_qmask.oq;
+ mask = (u32)oct->io_qmask.oq;
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
while (((d32 & mask) != mask) && loop--) {
d32 = octeon_read_csr(oct, CN6XXX_SLI_PORT_IN_RST_OQ);
@@ -408,8 +399,8 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
/* Reset the doorbell register for each Output queue. */
/* for (i = 0; i < oct->num_oqs; i++) { */
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct->io_qmask.oq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
octeon_write_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i), 0xFFFFFFFF);
d32 = octeon_read_csr(oct, CN6XXX_SLI_OQ_PKTS_CREDIT(i));
@@ -429,16 +420,16 @@ void lio_cn6xxx_disable_io_queues(struct octeon_device *oct)
void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
{
- u32 i;
+ int i;
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- if (!(oct->io_qmask.iq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & (1ULL << i)))
continue;
oct->fn_list.setup_iq_regs(oct, i);
}
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct->io_qmask.oq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
oct->fn_list.setup_oq_regs(oct, i);
}
@@ -450,8 +441,8 @@ void lio_cn6xxx_reinit_regs(struct octeon_device *oct)
oct->fn_list.enable_io_queues(oct);
/* for (i = 0; i < oct->num_oqs; i++) { */
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct->io_qmask.oq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
writel(oct->droq[i]->max_count, oct->droq[i]->pkts_credit_reg);
}
@@ -495,8 +486,7 @@ u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx)
}
u32
-lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
- struct octeon_instr_queue *iq)
+lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq)
{
u32 new_idx = readl(iq->inst_cnt_reg);
@@ -547,17 +537,18 @@ static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device *oct)
dev_dbg(&oct->pci_dev->dev, "Using PCIE Port %d\n", oct->pcie_port);
}
-void
+static void
lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64)
{
dev_err(&oct->pci_dev->dev, "Error Intr: 0x%016llx\n",
CVM_CAST64(intr64));
}
-int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
+static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
{
struct octeon_droq *droq;
- u32 oq_no, pkt_count, droq_time_mask, droq_mask, droq_int_enb;
+ int oq_no;
+ u32 pkt_count, droq_time_mask, droq_mask, droq_int_enb;
u32 droq_cnt_enb, droq_cnt_mask;
droq_cnt_enb = octeon_read_csr(oct, CN6XXX_SLI_PKT_CNT_INT_ENB);
@@ -573,12 +564,12 @@ int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct)
oct->droq_intr = 0;
/* for (oq_no = 0; oq_no < oct->num_oqs; oq_no++) { */
- for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
- if (!(droq_mask & (1 << oq_no)))
+ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct); oq_no++) {
+ if (!(droq_mask & (1ULL << oq_no)))
continue;
droq = oct->droq[oq_no];
- pkt_count = octeon_droq_check_hw_for_pkts(oct, droq);
+ pkt_count = octeon_droq_check_hw_for_pkts(droq);
if (pkt_count) {
oct->droq_intr |= (1ULL << oq_no);
if (droq->ops.poll_mode) {
diff --git a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
index f77918779355..28c47224221a 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn66xx_device.h
@@ -82,8 +82,6 @@ void lio_cn6xxx_setup_iq_regs(struct octeon_device *oct, u32 iq_no);
void lio_cn6xxx_setup_oq_regs(struct octeon_device *oct, u32 oq_no);
void lio_cn6xxx_enable_io_queues(struct octeon_device *oct);
void lio_cn6xxx_disable_io_queues(struct octeon_device *oct);
-void lio_cn6xxx_process_pcie_error_intr(struct octeon_device *oct, u64 intr64);
-int lio_cn6xxx_process_droq_intr_regs(struct octeon_device *oct);
irqreturn_t lio_cn6xxx_process_interrupt_regs(void *dev);
void lio_cn6xxx_reinit_regs(struct octeon_device *oct);
void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
@@ -91,8 +89,7 @@ void lio_cn6xxx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
void lio_cn6xxx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask);
u32 lio_cn6xxx_bar1_idx_read(struct octeon_device *oct, u32 idx);
u32
-lio_cn6xxx_update_read_index(struct octeon_device *oct __attribute__((unused)),
- struct octeon_instr_queue *iq);
+lio_cn6xxx_update_read_index(struct octeon_instr_queue *iq);
void lio_cn6xxx_enable_interrupt(void *chip);
void lio_cn6xxx_disable_interrupt(void *chip);
void cn6xxx_get_pcie_qlmport(struct octeon_device *oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
index 8e830d0c0754..29755bc68f12 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c
@@ -19,28 +19,17 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
-#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
{
@@ -129,7 +118,7 @@ static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct)
pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val);
}
-int lio_is_210nv(struct octeon_device *oct)
+static int lio_is_210nv(struct octeon_device *oct)
{
u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG);
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
index d4e1c9fb0bf2..ea7bdcce6044 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_device.h
@@ -28,6 +28,5 @@
#define __CN68XX_DEVICE_H__
int lio_setup_cn68xx_octeon_device(struct octeon_device *oct);
-int lio_is_210nv(struct octeon_device *oct);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
index 38cddbd107b6..d45a0f4aaf1f 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn68xx_regs.h
@@ -29,7 +29,6 @@
#ifndef __CN68XX_REGS_H__
#define __CN68XX_REGS_H__
-#include "cn66xx_regs.h"
/*###################### REQUEST QUEUE #########################*/
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
index 245c063ed4db..289eb8907922 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
@@ -19,13 +19,9 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
#include <linux/netdevice.h>
#include <linux/net_tstamp.h>
-#include <linux/ethtool.h>
-#include <linux/dma-mapping.h>
#include <linux/pci.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -36,9 +32,8 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
+
+static int octnet_get_link_stats(struct net_device *netdev);
struct oct_mdio_cmd_context {
int octeon_id;
@@ -71,34 +66,126 @@ enum {
INTERFACE_MODE_RXAUI,
INTERFACE_MODE_QSGMII,
INTERFACE_MODE_AGL,
+ INTERFACE_MODE_XLAUI,
+ INTERFACE_MODE_XFI,
+ INTERFACE_MODE_10G_KR,
+ INTERFACE_MODE_40G_KR4,
+ INTERFACE_MODE_MIXED,
};
#define ARRAY_LENGTH(a) (sizeof(a) / sizeof((a)[0]))
#define OCT_ETHTOOL_REGDUMP_LEN 4096
#define OCT_ETHTOOL_REGSVER 1
+/* statistics of PF */
+static const char oct_stats_strings[][ETH_GSTRING_LEN] = {
+ "rx_packets",
+ "tx_packets",
+ "rx_bytes",
+ "tx_bytes",
+ "rx_errors", /*jabber_err+l2_err+frame_err */
+ "tx_errors", /*fw_err_pko+fw_err_link+fw_err_drop */
+ "rx_dropped", /*st->fromwire.total_rcvd - st->fromwire.fw_total_rcvd
+ *+st->fromwire.dmac_drop + st->fromwire.fw_err_drop
+ */
+ "tx_dropped",
+
+ "tx_total_sent",
+ "tx_total_fwd",
+ "tx_err_pko",
+ "tx_err_link",
+ "tx_err_drop",
+
+ "tx_tso",
+ "tx_tso_packets",
+ "tx_tso_err",
+ "tx_vxlan",
+
+ "mac_tx_total_pkts",
+ "mac_tx_total_bytes",
+ "mac_tx_mcast_pkts",
+ "mac_tx_bcast_pkts",
+ "mac_tx_ctl_packets", /*oct->link_stats.fromhost.ctl_sent */
+ "mac_tx_total_collisions",
+ "mac_tx_one_collision",
+ "mac_tx_multi_collison",
+ "mac_tx_max_collision_fail",
+ "mac_tx_max_deferal_fail",
+ "mac_tx_fifo_err",
+ "mac_tx_runts",
+
+ "rx_total_rcvd",
+ "rx_total_fwd",
+ "rx_jabber_err",
+ "rx_l2_err",
+ "rx_frame_err",
+ "rx_err_pko",
+ "rx_err_link",
+ "rx_err_drop",
+
+ "rx_vxlan",
+ "rx_vxlan_err",
+
+ "rx_lro_pkts",
+ "rx_lro_bytes",
+ "rx_total_lro",
+
+ "rx_lro_aborts",
+ "rx_lro_aborts_port",
+ "rx_lro_aborts_seq",
+ "rx_lro_aborts_tsval",
+ "rx_lro_aborts_timer",
+ "rx_fwd_rate",
+
+ "mac_rx_total_rcvd",
+ "mac_rx_bytes",
+ "mac_rx_total_bcst",
+ "mac_rx_total_mcst",
+ "mac_rx_runts",
+ "mac_rx_ctl_packets",
+ "mac_rx_fifo_err",
+ "mac_rx_dma_drop",
+ "mac_rx_fcs_err",
+
+ "link_state_changes",
+};
+
+/* statistics of host tx queue */
static const char oct_iq_stats_strings[][ETH_GSTRING_LEN] = {
- "Instr posted",
- "Instr processed",
- "Instr dropped",
- "Bytes Sent",
- "Sgentry_sent",
- "Inst cntreg",
- "Tx done",
- "Tx Iq busy",
- "Tx dropped",
- "Tx bytes",
+ "packets", /*oct->instr_queue[iq_no]->stats.tx_done*/
+ "bytes", /*oct->instr_queue[iq_no]->stats.tx_tot_bytes*/
+ "dropped",
+ "iq_busy",
+ "sgentry_sent",
+
+ "fw_instr_posted",
+ "fw_instr_processed",
+ "fw_instr_dropped",
+ "fw_bytes_sent",
+
+ "tso",
+ "vxlan",
+ "txq_restart",
};
+/* statistics of host rx queue */
static const char oct_droq_stats_strings[][ETH_GSTRING_LEN] = {
- "OQ Pkts Received",
- "OQ Bytes Received",
- "Dropped no dispatch",
- "Dropped nomem",
- "Dropped toomany",
- "Stack RX cnt",
- "Stack RX Bytes",
- "RX dropped",
+ "packets", /*oct->droq[oq_no]->stats.rx_pkts_received */
+ "bytes", /*oct->droq[oq_no]->stats.rx_bytes_received */
+ "dropped", /*oct->droq[oq_no]->stats.rx_dropped+
+ *oct->droq[oq_no]->stats.dropped_nodispatch+
+ *oct->droq[oq_no]->stats.dropped_toomany+
+ *oct->droq[oq_no]->stats.dropped_nomem
+ */
+ "dropped_nomem",
+ "dropped_toomany",
+ "fw_dropped",
+ "fw_pkts_received",
+ "fw_bytes_received",
+ "fw_dropped_nodispatch",
+
+ "vxlan",
+ "buffer_alloc_failure",
};
#define OCTNIC_NCMD_AUTONEG_ON 0x1
@@ -112,8 +199,9 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
linfo = &lio->linfo;
- if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
- linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
+ if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
ecmd->port = PORT_FIBRE;
ecmd->supported =
(SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE |
@@ -124,10 +212,11 @@ static int lio_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->autoneg = AUTONEG_DISABLE;
} else {
- dev_err(&oct->pci_dev->dev, "Unknown link interface reported\n");
+ dev_err(&oct->pci_dev->dev, "Unknown link interface reported %d\n",
+ linfo->link.s.if_mode);
}
- if (linfo->link.s.status) {
+ if (linfo->link.s.link_up) {
ethtool_cmd_speed_set(ecmd, linfo->link.s.speed);
ecmd->duplex = linfo->link.s.duplex;
} else {
@@ -222,23 +311,20 @@ static int octnet_gpio_access(struct net_device *netdev, int addr, int val)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
int ret = 0;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_GPIO_ACCESS;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = addr;
- nctrl.ncmd.s.param3 = val;
+ nctrl.ncmd.s.param1 = addr;
+ nctrl.ncmd.s.param2 = val;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- nparams.resp_order = OCTEON_RESP_ORDERED;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Failed to configure gpio value\n");
return -EINVAL;
@@ -253,20 +339,18 @@ static void octnet_mdio_resp_callback(struct octeon_device *oct,
u32 status,
void *buf)
{
- struct oct_mdio_cmd_resp *mdio_cmd_rsp;
struct oct_mdio_cmd_context *mdio_cmd_ctx;
struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
- mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
mdio_cmd_ctx = (struct oct_mdio_cmd_context *)sc->ctxptr;
oct = lio_get_device(mdio_cmd_ctx->octeon_id);
if (status) {
dev_err(&oct->pci_dev->dev, "MIDO instruction failed. Status: %llx\n",
CVM_CAST64(status));
- ACCESS_ONCE(mdio_cmd_ctx->cond) = -1;
+ WRITE_ONCE(mdio_cmd_ctx->cond, -1);
} else {
- ACCESS_ONCE(mdio_cmd_ctx->cond) = 1;
+ WRITE_ONCE(mdio_cmd_ctx->cond, 1);
}
wake_up_interruptible(&mdio_cmd_ctx->wc);
}
@@ -297,15 +381,16 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
mdio_cmd_rsp = (struct oct_mdio_cmd_resp *)sc->virtrptr;
mdio_cmd = (struct oct_mdio_cmd *)sc->virtdptr;
- ACCESS_ONCE(mdio_cmd_ctx->cond) = 0;
+ WRITE_ONCE(mdio_cmd_ctx->cond, 0);
mdio_cmd_ctx->octeon_id = lio_get_device_id(oct_dev);
mdio_cmd->op = op;
mdio_cmd->mdio_addr = loc;
if (op)
mdio_cmd->value1 = *value;
- mdio_cmd->value2 = lio->linfo.ifidx;
octeon_swap_8B_data((u64 *)mdio_cmd, sizeof(struct oct_mdio_cmd) / 8);
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC, OPCODE_NIC_MDIO45,
0, 0, 0);
@@ -317,7 +402,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
retval = octeon_send_soft_command(oct_dev, sc);
- if (retval) {
+ if (retval == IQ_SEND_FAILED) {
dev_err(&oct_dev->pci_dev->dev,
"octnet_mdio45_access instruction failed status: %x\n",
retval);
@@ -335,7 +420,7 @@ octnet_mdio45_access(struct lio *lio, int op, int loc, int *value)
octeon_swap_8B_data((u64 *)(&mdio_cmd_rsp->resp),
sizeof(struct oct_mdio_cmd) / 8);
- if (ACCESS_ONCE(mdio_cmd_ctx->cond) == 1) {
+ if (READ_ONCE(mdio_cmd_ctx->cond) == 1) {
if (!op)
*value = mdio_cmd_rsp->resp.value1;
} else {
@@ -379,18 +464,16 @@ static int lio_set_phys_id(struct net_device *netdev,
/* Configure Beacon values */
value = LIO68XX_LED_BEACON_CFGON;
- ret =
- octnet_mdio45_access(lio, 1,
- LIO68XX_LED_BEACON_ADDR,
- &value);
+ ret = octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_BEACON_ADDR,
+ &value);
if (ret)
return ret;
value = LIO68XX_LED_CTRL_CFGON;
- ret =
- octnet_mdio45_access(lio, 1,
- LIO68XX_LED_CTRL_ADDR,
- &value);
+ ret = octnet_mdio45_access(lio, 1,
+ LIO68XX_LED_CTRL_ADDR,
+ &value);
if (ret)
return ret;
} else {
@@ -469,7 +552,7 @@ lio_ethtool_get_ringparam(struct net_device *netdev,
tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
}
- if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
+ if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) {
ering->rx_pending = 0;
ering->rx_max_pending = 0;
ering->rx_mini_pending = 0;
@@ -503,10 +586,10 @@ static void lio_set_msglevel(struct net_device *netdev, u32 msglvl)
if ((msglvl ^ lio->msg_enable) & NETIF_MSG_HW) {
if (msglvl & NETIF_MSG_HW)
liquidio_set_feature(netdev,
- OCTNET_CMD_VERBOSE_ENABLE);
+ OCTNET_CMD_VERBOSE_ENABLE, 0);
else
liquidio_set_feature(netdev,
- OCTNET_CMD_VERBOSE_DISABLE);
+ OCTNET_CMD_VERBOSE_DISABLE, 0);
}
lio->msg_enable = msglvl;
@@ -518,61 +601,279 @@ lio_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
/* Notes: Not supporting any auto negotiation in these
* drivers. Just report pause frame support.
*/
- pause->tx_pause = 1;
- pause->rx_pause = 1; /* TODO: Need to support RX pause frame!!. */
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+
+ pause->autoneg = 0;
+
+ pause->tx_pause = oct->tx_pause;
+ pause->rx_pause = oct->rx_pause;
}
static void
lio_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats *stats __attribute__((unused)),
+ u64 *data)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
+ struct net_device_stats *netstats = &netdev->stats;
int i = 0, j;
- for (j = 0; j < MAX_OCTEON_INSTR_QUEUES; j++) {
- if (!(oct_dev->io_qmask.iq & (1UL << j)))
+ netdev->netdev_ops->ndo_get_stats(netdev);
+ octnet_get_link_stats(netdev);
+
+ /*sum of oct->droq[oq_no]->stats->rx_pkts_received */
+ data[i++] = CVM_CAST64(netstats->rx_packets);
+ /*sum of oct->instr_queue[iq_no]->stats.tx_done */
+ data[i++] = CVM_CAST64(netstats->tx_packets);
+ /*sum of oct->droq[oq_no]->stats->rx_bytes_received */
+ data[i++] = CVM_CAST64(netstats->rx_bytes);
+ /*sum of oct->instr_queue[iq_no]->stats.tx_tot_bytes */
+ data[i++] = CVM_CAST64(netstats->tx_bytes);
+ data[i++] = CVM_CAST64(netstats->rx_errors);
+ data[i++] = CVM_CAST64(netstats->tx_errors);
+ /*sum of oct->droq[oq_no]->stats->rx_dropped +
+ *oct->droq[oq_no]->stats->dropped_nodispatch +
+ *oct->droq[oq_no]->stats->dropped_toomany +
+ *oct->droq[oq_no]->stats->dropped_nomem
+ */
+ data[i++] = CVM_CAST64(netstats->rx_dropped);
+ /*sum of oct->instr_queue[iq_no]->stats.tx_dropped */
+ data[i++] = CVM_CAST64(netstats->tx_dropped);
+
+ /*data[i++] = CVM_CAST64(stats->multicast); */
+ /*data[i++] = CVM_CAST64(stats->collisions); */
+
+ /* firmware tx stats */
+ /*per_core_stats[cvmx_get_core_num()].link_stats[mdata->from_ifidx].
+ *fromhost.fw_total_sent
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_sent);
+ /*per_core_stats[i].link_stats[port].fromwire.fw_total_fwd */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_total_fwd);
+ /*per_core_stats[j].link_stats[i].fromhost.fw_err_pko */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_pko);
+ /*per_core_stats[j].link_stats[i].fromhost.fw_err_link */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_link);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_err_drop
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_drop);
+
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.fw_tso */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_tso_fwd
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tso_fwd);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_err_tso
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_err_tso);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[idx].fromhost.
+ *fw_tx_vxlan
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fw_tx_vxlan);
+
+ /* mac tx statistics */
+ /*CVMX_BGXX_CMRX_TX_STAT5 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_pkts_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT4 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_bytes_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT15 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.mcast_pkts_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT14 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.bcast_pkts_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT17 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.ctl_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT0 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.total_collisions);
+ /*CVMX_BGXX_CMRX_TX_STAT3 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.one_collision_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT2 */
+ data[i++] =
+ CVM_CAST64(oct_dev->link_stats.fromhost.multi_collision_sent);
+ /*CVMX_BGXX_CMRX_TX_STAT0 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_collision_fail);
+ /*CVMX_BGXX_CMRX_TX_STAT1 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.max_deferral_fail);
+ /*CVMX_BGXX_CMRX_TX_STAT16 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.fifo_err);
+ /*CVMX_BGXX_CMRX_TX_STAT6 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromhost.runts);
+
+ /* RX firmware stats */
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_total_rcvd
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_rcvd);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_total_fwd
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_fwd);
+ /*per_core_stats[core_id].link_stats[ifidx].fromwire.jabber_err */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.jabber_err);
+ /*per_core_stats[core_id].link_stats[ifidx].fromwire.l2_err */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.l2_err);
+ /*per_core_stats[core_id].link_stats[ifidx].fromwire.frame_err */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.frame_err);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_err_pko
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_pko);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_err_link */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_link);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
+ *fromwire.fw_err_drop
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_err_drop);
+
+ /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
+ *fromwire.fw_rx_vxlan
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[lro_ctx->ifidx].
+ *fromwire.fw_rx_vxlan_err
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_rx_vxlan_err);
+
+ /* LRO */
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_pkts
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_pkts);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_octs
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_octs);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_total_lro */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_total_lro);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_port
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_port);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_seq
+ */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_seq);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_tsval
+ */
+ data[i++] =
+ CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_tsval);
+ /*per_core_stats[cvmx_get_core_num()].link_stats[ifidx].fromwire.
+ *fw_lro_aborts_timer
+ */
+ /* intrmod: packet forward rate */
+ data[i++] =
+ CVM_CAST64(oct_dev->link_stats.fromwire.fw_lro_aborts_timer);
+ /*per_core_stats[j].link_stats[i].fromwire.fw_lro_aborts */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fwd_rate);
+
+ /* mac: link-level stats */
+ /*CVMX_BGXX_CMRX_RX_STAT0 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_rcvd);
+ /*CVMX_BGXX_CMRX_RX_STAT1 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.bytes_rcvd);
+ /*CVMX_PKI_STATX_STAT5 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_bcst);
+ /*CVMX_PKI_STATX_STAT5 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.total_mcst);
+ /*wqe->word2.err_code or wqe->word2.err_level */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.runts);
+ /*CVMX_BGXX_CMRX_RX_STAT2 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.ctl_rcvd);
+ /*CVMX_BGXX_CMRX_RX_STAT6 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fifo_err);
+ /*CVMX_BGXX_CMRX_RX_STAT4 */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.dmac_drop);
+ /*wqe->word2.err_code or wqe->word2.err_level */
+ data[i++] = CVM_CAST64(oct_dev->link_stats.fromwire.fcs_err);
+ /*lio->link_changes*/
+ data[i++] = CVM_CAST64(lio->link_changes);
+
+ /* TX -- lio_update_stats(lio); */
+ for (j = 0; j < MAX_OCTEON_INSTR_QUEUES(oct_dev); j++) {
+ if (!(oct_dev->io_qmask.iq & (1ULL << j)))
continue;
+ /*packets to network port*/
+ /*# of packets tx to network */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
+ /*# of bytes tx to network */
data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
- data[i++] =
- CVM_CAST64(
- oct_dev->instr_queue[j]->stats.instr_processed);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
+ /*# of packets dropped */
data[i++] =
- CVM_CAST64(
- oct_dev->instr_queue[j]->stats.instr_dropped);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
+ /*# of tx fails due to queue full */
data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
+ /*XXX gather entries sent */
data[i++] =
CVM_CAST64(oct_dev->instr_queue[j]->stats.sgentry_sent);
+
+ /*instruction to firmware: data and control */
+ /*# of instructions to the queue */
data[i++] =
- readl(oct_dev->instr_queue[j]->inst_cnt_reg);
- data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_done);
- data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_iq_busy);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.instr_posted);
+ /*# of instructions processed */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
+ stats.instr_processed);
+ /*# of instructions could not be processed */
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->
+ stats.instr_dropped);
+ /*bytes sent through the queue */
data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_dropped);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.bytes_sent);
+
+ /*tso request*/
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_gso);
+ /*vxlan request*/
+ data[i++] = CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_vxlan);
+ /*txq restart*/
data[i++] =
- CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_tot_bytes);
+ CVM_CAST64(oct_dev->instr_queue[j]->stats.tx_restart);
}
- /* for (j = 0; j < oct_dev->num_oqs; j++){ */
- for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES; j++) {
- if (!(oct_dev->io_qmask.oq & (1UL << j)))
+ /* RX */
+ /* for (j = 0; j < oct_dev->num_oqs; j++) { */
+ for (j = 0; j < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); j++) {
+ if (!(oct_dev->io_qmask.oq & (1ULL << j)))
continue;
- data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
- data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
- data[i++] =
- CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
- data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
- data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
+
+ /*packets send to TCP/IP network stack */
+ /*# of packets to network stack */
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.rx_pkts_received);
+ /*# of bytes to network stack */
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.rx_bytes_received);
+ /*# of packets dropped */
+ data[i++] = CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem +
+ oct_dev->droq[j]->stats.dropped_toomany +
+ oct_dev->droq[j]->stats.rx_dropped);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_nomem);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_toomany);
data[i++] =
CVM_CAST64(oct_dev->droq[j]->stats.rx_dropped);
+
+ /*control and data path*/
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.pkts_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.bytes_received);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.dropped_nodispatch);
+
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_vxlan);
+ data[i++] =
+ CVM_CAST64(oct_dev->droq[j]->stats.rx_alloc_failure);
}
}
@@ -581,26 +882,43 @@ static void lio_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
int num_iq_stats, num_oq_stats, i, j;
+ int num_stats;
- num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- if (!(oct_dev->io_qmask.iq & (1UL << i)))
- continue;
- for (j = 0; j < num_iq_stats; j++) {
- sprintf(data, "IQ%d %s", i, oct_iq_stats_strings[j]);
+ switch (stringset) {
+ case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(oct_stats_strings);
+ for (j = 0; j < num_stats; j++) {
+ sprintf(data, "%s", oct_stats_strings[j]);
data += ETH_GSTRING_LEN;
}
- }
- num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
- /* for (i = 0; i < oct_dev->num_oqs; i++) { */
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct_dev->io_qmask.oq & (1UL << i)))
- continue;
- for (j = 0; j < num_oq_stats; j++) {
- sprintf(data, "OQ%d %s", i, oct_droq_stats_strings[j]);
- data += ETH_GSTRING_LEN;
+ num_iq_stats = ARRAY_SIZE(oct_iq_stats_strings);
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct_dev); i++) {
+ if (!(oct_dev->io_qmask.iq & (1ULL << i)))
+ continue;
+ for (j = 0; j < num_iq_stats; j++) {
+ sprintf(data, "tx-%d-%s", i,
+ oct_iq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
}
+
+ num_oq_stats = ARRAY_SIZE(oct_droq_stats_strings);
+ /* for (i = 0; i < oct_dev->num_oqs; i++) { */
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct_dev); i++) {
+ if (!(oct_dev->io_qmask.oq & (1ULL << i)))
+ continue;
+ for (j = 0; j < num_oq_stats; j++) {
+ sprintf(data, "rx-%d-%s", i,
+ oct_droq_stats_strings[j]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ break;
+
+ default:
+ netif_info(lio, drv, lio->netdev, "Unknown Stringset !!\n");
+ break;
}
}
@@ -609,8 +927,14 @@ static int lio_get_sset_count(struct net_device *netdev, int sset)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct_dev = lio->oct_dev;
- return (ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs) +
- (ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+ switch (sset) {
+ case ETH_SS_STATS:
+ return (ARRAY_SIZE(oct_stats_strings) +
+ ARRAY_SIZE(oct_iq_stats_strings) * oct_dev->num_iqs +
+ ARRAY_SIZE(oct_droq_stats_strings) * oct_dev->num_oqs);
+ default:
+ return -EOPNOTSUPP;
+ }
}
static int lio_get_intr_coalesce(struct net_device *netdev,
@@ -618,50 +942,49 @@ static int lio_get_intr_coalesce(struct net_device *netdev,
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
struct octeon_instr_queue *iq;
struct oct_intrmod_cfg *intrmod_cfg;
intrmod_cfg = &oct->intrmod;
switch (oct->chip_id) {
- /* case OCTEON_CN73XX: Todo */
- /* break; */
case OCTEON_CN68XX:
- case OCTEON_CN66XX:
- if (!intrmod_cfg->intrmod_enable) {
+ case OCTEON_CN66XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+
+ if (!intrmod_cfg->rx_enable) {
intr_coal->rx_coalesce_usecs =
CFG_GET_OQ_INTR_TIME(cn6xxx->conf);
intr_coal->rx_max_coalesced_frames =
CFG_GET_OQ_INTR_PKT(cn6xxx->conf);
- } else {
- intr_coal->use_adaptive_rx_coalesce =
- intrmod_cfg->intrmod_enable;
- intr_coal->rate_sample_interval =
- intrmod_cfg->intrmod_check_intrvl;
- intr_coal->pkt_rate_high =
- intrmod_cfg->intrmod_maxpkt_ratethr;
- intr_coal->pkt_rate_low =
- intrmod_cfg->intrmod_minpkt_ratethr;
- intr_coal->rx_max_coalesced_frames_high =
- intrmod_cfg->intrmod_maxcnt_trigger;
- intr_coal->rx_coalesce_usecs_high =
- intrmod_cfg->intrmod_maxtmr_trigger;
- intr_coal->rx_coalesce_usecs_low =
- intrmod_cfg->intrmod_mintmr_trigger;
- intr_coal->rx_max_coalesced_frames_low =
- intrmod_cfg->intrmod_mincnt_trigger;
}
-
- iq = oct->instr_queue[lio->linfo.txpciq[0]];
+ iq = oct->instr_queue[lio->linfo.txpciq[0].s.q_no];
intr_coal->tx_max_coalesced_frames = iq->fill_threshold;
break;
-
+ }
default:
netif_info(lio, drv, lio->netdev, "Unknown Chip !!\n");
return -EINVAL;
}
-
+ if (intrmod_cfg->rx_enable) {
+ intr_coal->use_adaptive_rx_coalesce =
+ intrmod_cfg->rx_enable;
+ intr_coal->rate_sample_interval =
+ intrmod_cfg->check_intrvl;
+ intr_coal->pkt_rate_high =
+ intrmod_cfg->maxpkt_ratethr;
+ intr_coal->pkt_rate_low =
+ intrmod_cfg->minpkt_ratethr;
+ intr_coal->rx_max_coalesced_frames_high =
+ intrmod_cfg->rx_maxcnt_trigger;
+ intr_coal->rx_coalesce_usecs_high =
+ intrmod_cfg->rx_maxtmr_trigger;
+ intr_coal->rx_coalesce_usecs_low =
+ intrmod_cfg->rx_mintmr_trigger;
+ intr_coal->rx_max_coalesced_frames_low =
+ intrmod_cfg->rx_mincnt_trigger;
+ }
return 0;
}
@@ -681,19 +1004,20 @@ static void octnet_intrmod_callback(struct octeon_device *oct_dev,
else
dev_info(&oct_dev->pci_dev->dev,
"Rx-Adaptive Interrupt moderation enabled:%llx\n",
- oct_dev->intrmod.intrmod_enable);
+ oct_dev->intrmod.rx_enable);
octeon_free_soft_command(oct_dev, sc);
}
/* Configure interrupt moderation parameters */
-static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
+static int octnet_set_intrmod_cfg(struct lio *lio,
+ struct oct_intrmod_cfg *intr_cfg)
{
struct octeon_soft_command *sc;
struct oct_intrmod_cmd *cmd;
struct oct_intrmod_cfg *cfg;
int retval;
- struct octeon_device *oct_dev = (struct octeon_device *)oct;
+ struct octeon_device *oct_dev = lio->oct_dev;
/* Alloc soft command */
sc = (struct octeon_soft_command *)
@@ -714,6 +1038,8 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
cmd->cfg = cfg;
cmd->oct_dev = oct_dev;
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
OPCODE_NIC_INTRMOD_CFG, 0, 0, 0);
@@ -722,17 +1048,171 @@ static int octnet_set_intrmod_cfg(void *oct, struct oct_intrmod_cfg *intr_cfg)
sc->wait_time = 1000;
retval = octeon_send_soft_command(oct_dev, sc);
- if (retval) {
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct_dev, sc);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+octnet_nic_stats_callback(struct octeon_device *oct_dev,
+ u32 status, void *ptr)
+{
+ struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
+ struct oct_nic_stats_resp *resp = (struct oct_nic_stats_resp *)
+ sc->virtrptr;
+ struct oct_nic_stats_ctrl *ctrl = (struct oct_nic_stats_ctrl *)
+ sc->ctxptr;
+ struct nic_rx_stats *rsp_rstats = &resp->stats.fromwire;
+ struct nic_tx_stats *rsp_tstats = &resp->stats.fromhost;
+
+ struct nic_rx_stats *rstats = &oct_dev->link_stats.fromwire;
+ struct nic_tx_stats *tstats = &oct_dev->link_stats.fromhost;
+
+ if ((status != OCTEON_REQUEST_TIMEOUT) && !resp->status) {
+ octeon_swap_8B_data((u64 *)&resp->stats,
+ (sizeof(struct oct_link_stats)) >> 3);
+
+ /* RX link-level stats */
+ rstats->total_rcvd = rsp_rstats->total_rcvd;
+ rstats->bytes_rcvd = rsp_rstats->bytes_rcvd;
+ rstats->total_bcst = rsp_rstats->total_bcst;
+ rstats->total_mcst = rsp_rstats->total_mcst;
+ rstats->runts = rsp_rstats->runts;
+ rstats->ctl_rcvd = rsp_rstats->ctl_rcvd;
+ /* Accounts for over/under-run of buffers */
+ rstats->fifo_err = rsp_rstats->fifo_err;
+ rstats->dmac_drop = rsp_rstats->dmac_drop;
+ rstats->fcs_err = rsp_rstats->fcs_err;
+ rstats->jabber_err = rsp_rstats->jabber_err;
+ rstats->l2_err = rsp_rstats->l2_err;
+ rstats->frame_err = rsp_rstats->frame_err;
+
+ /* RX firmware stats */
+ rstats->fw_total_rcvd = rsp_rstats->fw_total_rcvd;
+ rstats->fw_total_fwd = rsp_rstats->fw_total_fwd;
+ rstats->fw_err_pko = rsp_rstats->fw_err_pko;
+ rstats->fw_err_link = rsp_rstats->fw_err_link;
+ rstats->fw_err_drop = rsp_rstats->fw_err_drop;
+ rstats->fw_rx_vxlan = rsp_rstats->fw_rx_vxlan;
+ rstats->fw_rx_vxlan_err = rsp_rstats->fw_rx_vxlan_err;
+
+ /* Number of packets that are LROed */
+ rstats->fw_lro_pkts = rsp_rstats->fw_lro_pkts;
+ /* Number of octets that are LROed */
+ rstats->fw_lro_octs = rsp_rstats->fw_lro_octs;
+ /* Number of LRO packets formed */
+ rstats->fw_total_lro = rsp_rstats->fw_total_lro;
+ /* Number of times lRO of packet aborted */
+ rstats->fw_lro_aborts = rsp_rstats->fw_lro_aborts;
+ rstats->fw_lro_aborts_port = rsp_rstats->fw_lro_aborts_port;
+ rstats->fw_lro_aborts_seq = rsp_rstats->fw_lro_aborts_seq;
+ rstats->fw_lro_aborts_tsval = rsp_rstats->fw_lro_aborts_tsval;
+ rstats->fw_lro_aborts_timer = rsp_rstats->fw_lro_aborts_timer;
+ /* intrmod: packet forward rate */
+ rstats->fwd_rate = rsp_rstats->fwd_rate;
+
+ /* TX link-level stats */
+ tstats->total_pkts_sent = rsp_tstats->total_pkts_sent;
+ tstats->total_bytes_sent = rsp_tstats->total_bytes_sent;
+ tstats->mcast_pkts_sent = rsp_tstats->mcast_pkts_sent;
+ tstats->bcast_pkts_sent = rsp_tstats->bcast_pkts_sent;
+ tstats->ctl_sent = rsp_tstats->ctl_sent;
+ /* Packets sent after one collision*/
+ tstats->one_collision_sent = rsp_tstats->one_collision_sent;
+ /* Packets sent after multiple collision*/
+ tstats->multi_collision_sent = rsp_tstats->multi_collision_sent;
+ /* Packets not sent due to max collisions */
+ tstats->max_collision_fail = rsp_tstats->max_collision_fail;
+ /* Packets not sent due to max deferrals */
+ tstats->max_deferral_fail = rsp_tstats->max_deferral_fail;
+ /* Accounts for over/under-run of buffers */
+ tstats->fifo_err = rsp_tstats->fifo_err;
+ tstats->runts = rsp_tstats->runts;
+ /* Total number of collisions detected */
+ tstats->total_collisions = rsp_tstats->total_collisions;
+
+ /* firmware stats */
+ tstats->fw_total_sent = rsp_tstats->fw_total_sent;
+ tstats->fw_total_fwd = rsp_tstats->fw_total_fwd;
+ tstats->fw_err_pko = rsp_tstats->fw_err_pko;
+ tstats->fw_err_link = rsp_tstats->fw_err_link;
+ tstats->fw_err_drop = rsp_tstats->fw_err_drop;
+ tstats->fw_tso = rsp_tstats->fw_tso;
+ tstats->fw_tso_fwd = rsp_tstats->fw_tso_fwd;
+ tstats->fw_err_tso = rsp_tstats->fw_err_tso;
+ tstats->fw_tx_vxlan = rsp_tstats->fw_tx_vxlan;
+
+ resp->status = 1;
+ } else {
+ resp->status = -1;
+ }
+ complete(&ctrl->complete);
+}
+
+/* Configure interrupt moderation parameters */
+static int octnet_get_link_stats(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct_dev = lio->oct_dev;
+
+ struct octeon_soft_command *sc;
+ struct oct_nic_stats_ctrl *ctrl;
+ struct oct_nic_stats_resp *resp;
+
+ int retval;
+
+ /* Alloc soft command */
+ sc = (struct octeon_soft_command *)
+ octeon_alloc_soft_command(oct_dev,
+ 0,
+ sizeof(struct oct_nic_stats_resp),
+ sizeof(struct octnic_ctrl_pkt));
+
+ if (!sc)
+ return -ENOMEM;
+
+ resp = (struct oct_nic_stats_resp *)sc->virtrptr;
+ memset(resp, 0, sizeof(struct oct_nic_stats_resp));
+
+ ctrl = (struct oct_nic_stats_ctrl *)sc->ctxptr;
+ memset(ctrl, 0, sizeof(struct oct_nic_stats_ctrl));
+ ctrl->netdev = netdev;
+ init_completion(&ctrl->complete);
+
+ sc->iq_no = lio->linfo.txpciq[0].s.q_no;
+
+ octeon_prepare_soft_command(oct_dev, sc, OPCODE_NIC,
+ OPCODE_NIC_PORT_STATS, 0, 0, 0);
+
+ sc->callback = octnet_nic_stats_callback;
+ sc->callback_arg = sc;
+ sc->wait_time = 500; /*in milli seconds*/
+
+ retval = octeon_send_soft_command(oct_dev, sc);
+ if (retval == IQ_SEND_FAILED) {
+ octeon_free_soft_command(oct_dev, sc);
+ return -EINVAL;
+ }
+
+ wait_for_completion_timeout(&ctrl->complete, msecs_to_jiffies(1000));
+
+ if (resp->status != 1) {
octeon_free_soft_command(oct_dev, sc);
+
return -EINVAL;
}
+ octeon_free_soft_command(oct_dev, sc);
+
return 0;
}
/* Enable/Disable auto interrupt Moderation */
static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
- *intr_coal, int adaptive)
+ *intr_coal)
{
int ret = 0;
struct octeon_device *oct = lio->oct_dev;
@@ -740,59 +1220,73 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
intrmod_cfg = &oct->intrmod;
- if (adaptive) {
+ if (oct->intrmod.rx_enable || oct->intrmod.tx_enable) {
if (intr_coal->rate_sample_interval)
- intrmod_cfg->intrmod_check_intrvl =
+ intrmod_cfg->check_intrvl =
intr_coal->rate_sample_interval;
else
- intrmod_cfg->intrmod_check_intrvl =
+ intrmod_cfg->check_intrvl =
LIO_INTRMOD_CHECK_INTERVAL;
if (intr_coal->pkt_rate_high)
- intrmod_cfg->intrmod_maxpkt_ratethr =
+ intrmod_cfg->maxpkt_ratethr =
intr_coal->pkt_rate_high;
else
- intrmod_cfg->intrmod_maxpkt_ratethr =
+ intrmod_cfg->maxpkt_ratethr =
LIO_INTRMOD_MAXPKT_RATETHR;
if (intr_coal->pkt_rate_low)
- intrmod_cfg->intrmod_minpkt_ratethr =
+ intrmod_cfg->minpkt_ratethr =
intr_coal->pkt_rate_low;
else
- intrmod_cfg->intrmod_minpkt_ratethr =
+ intrmod_cfg->minpkt_ratethr =
LIO_INTRMOD_MINPKT_RATETHR;
-
+ }
+ if (oct->intrmod.rx_enable) {
if (intr_coal->rx_max_coalesced_frames_high)
- intrmod_cfg->intrmod_maxcnt_trigger =
+ intrmod_cfg->rx_maxcnt_trigger =
intr_coal->rx_max_coalesced_frames_high;
else
- intrmod_cfg->intrmod_maxcnt_trigger =
- LIO_INTRMOD_MAXCNT_TRIGGER;
+ intrmod_cfg->rx_maxcnt_trigger =
+ LIO_INTRMOD_RXMAXCNT_TRIGGER;
if (intr_coal->rx_coalesce_usecs_high)
- intrmod_cfg->intrmod_maxtmr_trigger =
+ intrmod_cfg->rx_maxtmr_trigger =
intr_coal->rx_coalesce_usecs_high;
else
- intrmod_cfg->intrmod_maxtmr_trigger =
- LIO_INTRMOD_MAXTMR_TRIGGER;
+ intrmod_cfg->rx_maxtmr_trigger =
+ LIO_INTRMOD_RXMAXTMR_TRIGGER;
if (intr_coal->rx_coalesce_usecs_low)
- intrmod_cfg->intrmod_mintmr_trigger =
+ intrmod_cfg->rx_mintmr_trigger =
intr_coal->rx_coalesce_usecs_low;
else
- intrmod_cfg->intrmod_mintmr_trigger =
- LIO_INTRMOD_MINTMR_TRIGGER;
+ intrmod_cfg->rx_mintmr_trigger =
+ LIO_INTRMOD_RXMINTMR_TRIGGER;
if (intr_coal->rx_max_coalesced_frames_low)
- intrmod_cfg->intrmod_mincnt_trigger =
+ intrmod_cfg->rx_mincnt_trigger =
intr_coal->rx_max_coalesced_frames_low;
else
- intrmod_cfg->intrmod_mincnt_trigger =
- LIO_INTRMOD_MINCNT_TRIGGER;
+ intrmod_cfg->rx_mincnt_trigger =
+ LIO_INTRMOD_RXMINCNT_TRIGGER;
+ }
+ if (oct->intrmod.tx_enable) {
+ if (intr_coal->tx_max_coalesced_frames_high)
+ intrmod_cfg->tx_maxcnt_trigger =
+ intr_coal->tx_max_coalesced_frames_high;
+ else
+ intrmod_cfg->tx_maxcnt_trigger =
+ LIO_INTRMOD_TXMAXCNT_TRIGGER;
+ if (intr_coal->tx_max_coalesced_frames_low)
+ intrmod_cfg->tx_mincnt_trigger =
+ intr_coal->tx_max_coalesced_frames_low;
+ else
+ intrmod_cfg->tx_mincnt_trigger =
+ LIO_INTRMOD_TXMINCNT_TRIGGER;
}
- intrmod_cfg->intrmod_enable = adaptive;
- ret = octnet_set_intrmod_cfg(oct, intrmod_cfg);
+ ret = octnet_set_intrmod_cfg(lio, intrmod_cfg);
return ret;
}
@@ -800,54 +1294,82 @@ static int oct_cfg_adaptive_intr(struct lio *lio, struct ethtool_coalesce
static int
oct_cfg_rx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal)
{
- int ret;
struct octeon_device *oct = lio->oct_dev;
- struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
u32 rx_max_coalesced_frames;
- if (!intr_coal->rx_max_coalesced_frames)
- rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
- else
- rx_max_coalesced_frames = intr_coal->rx_max_coalesced_frames;
-
- /* Disable adaptive interrupt modulation */
- ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
- if (ret)
- return ret;
-
/* Config Cnt based interrupt values */
- octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
- rx_max_coalesced_frames);
- CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+
+ if (!intr_coal->rx_max_coalesced_frames)
+ rx_max_coalesced_frames = CN6XXX_OQ_INTR_PKT;
+ else
+ rx_max_coalesced_frames =
+ intr_coal->rx_max_coalesced_frames;
+ octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_PKTS,
+ rx_max_coalesced_frames);
+ CFG_SET_OQ_INTR_PKT(cn6xxx->conf, rx_max_coalesced_frames);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
return 0;
}
static int oct_cfg_rx_intrtime(struct lio *lio, struct ethtool_coalesce
*intr_coal)
{
- int ret;
struct octeon_device *oct = lio->oct_dev;
- struct octeon_cn6xxx *cn6xxx = (struct octeon_cn6xxx *)oct->chip;
u32 time_threshold, rx_coalesce_usecs;
- if (!intr_coal->rx_coalesce_usecs)
- rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
- else
- rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
+ /* Config Time based interrupt values */
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX: {
+ struct octeon_cn6xxx *cn6xxx =
+ (struct octeon_cn6xxx *)oct->chip;
+ if (!intr_coal->rx_coalesce_usecs)
+ rx_coalesce_usecs = CN6XXX_OQ_INTR_TIME;
+ else
+ rx_coalesce_usecs = intr_coal->rx_coalesce_usecs;
- /* Disable adaptive interrupt modulation */
- ret = oct_cfg_adaptive_intr(lio, intr_coal, 0);
- if (ret)
- return ret;
+ time_threshold = lio_cn6xxx_get_oq_ticks(oct,
+ rx_coalesce_usecs);
+ octeon_write_csr(oct,
+ CN6XXX_SLI_OQ_INT_LEVEL_TIME,
+ time_threshold);
- /* Config Time based interrupt values */
- time_threshold = lio_cn6xxx_get_oq_ticks(oct, rx_coalesce_usecs);
- octeon_write_csr(oct, CN6XXX_SLI_OQ_INT_LEVEL_TIME, time_threshold);
- CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
+ CFG_SET_OQ_INTR_TIME(cn6xxx->conf, rx_coalesce_usecs);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
return 0;
}
+static int
+oct_cfg_tx_intrcnt(struct lio *lio, struct ethtool_coalesce *intr_coal
+ __attribute__((unused)))
+{
+ struct octeon_device *oct = lio->oct_dev;
+
+ /* Config Cnt based interrupt values */
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int lio_set_intr_coalesce(struct net_device *netdev,
struct ethtool_coalesce *intr_coal)
{
@@ -855,59 +1377,48 @@ static int lio_set_intr_coalesce(struct net_device *netdev,
int ret;
struct octeon_device *oct = lio->oct_dev;
u32 j, q_no;
+ int db_max, db_min;
- if ((intr_coal->tx_max_coalesced_frames >= CN6XXX_DB_MIN) &&
- (intr_coal->tx_max_coalesced_frames <= CN6XXX_DB_MAX)) {
- for (j = 0; j < lio->linfo.num_txpciq; j++) {
- q_no = lio->linfo.txpciq[j];
- oct->instr_queue[q_no]->fill_threshold =
- intr_coal->tx_max_coalesced_frames;
+ switch (oct->chip_id) {
+ case OCTEON_CN68XX:
+ case OCTEON_CN66XX:
+ db_min = CN6XXX_DB_MIN;
+ db_max = CN6XXX_DB_MAX;
+ if ((intr_coal->tx_max_coalesced_frames >= db_min) &&
+ (intr_coal->tx_max_coalesced_frames <= db_max)) {
+ for (j = 0; j < lio->linfo.num_txpciq; j++) {
+ q_no = lio->linfo.txpciq[j].s.q_no;
+ oct->instr_queue[q_no]->fill_threshold =
+ intr_coal->tx_max_coalesced_frames;
+ }
+ } else {
+ dev_err(&oct->pci_dev->dev,
+ "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
+ intr_coal->tx_max_coalesced_frames, db_min,
+ db_max);
+ return -EINVAL;
}
- } else {
- dev_err(&oct->pci_dev->dev,
- "LIQUIDIO: Invalid tx-frames:%d. Range is min:%d max:%d\n",
- intr_coal->tx_max_coalesced_frames, CN6XXX_DB_MIN,
- CN6XXX_DB_MAX);
+ break;
+ default:
return -EINVAL;
}
- /* User requested adaptive-rx on */
- if (intr_coal->use_adaptive_rx_coalesce) {
- ret = oct_cfg_adaptive_intr(lio, intr_coal, 1);
- if (ret)
- goto ret_intrmod;
- }
+ oct->intrmod.rx_enable = intr_coal->use_adaptive_rx_coalesce ? 1 : 0;
+ oct->intrmod.tx_enable = intr_coal->use_adaptive_tx_coalesce ? 1 : 0;
- /* User requested adaptive-rx off and rx coalesce */
- if ((intr_coal->rx_coalesce_usecs) &&
- (!intr_coal->use_adaptive_rx_coalesce)) {
+ ret = oct_cfg_adaptive_intr(lio, intr_coal);
+
+ if (!intr_coal->use_adaptive_rx_coalesce) {
ret = oct_cfg_rx_intrtime(lio, intr_coal);
if (ret)
goto ret_intrmod;
- }
- /* User requested adaptive-rx off and rx coalesce */
- if ((intr_coal->rx_max_coalesced_frames) &&
- (!intr_coal->use_adaptive_rx_coalesce)) {
ret = oct_cfg_rx_intrcnt(lio, intr_coal);
if (ret)
goto ret_intrmod;
}
-
- /* User requested adaptive-rx off, so use default coalesce params */
- if ((!intr_coal->rx_max_coalesced_frames) &&
- (!intr_coal->use_adaptive_rx_coalesce) &&
- (!intr_coal->rx_coalesce_usecs)) {
- dev_info(&oct->pci_dev->dev,
- "Turning off adaptive-rx interrupt moderation\n");
- dev_info(&oct->pci_dev->dev,
- "Using RX Coalesce Default values rx_coalesce_usecs:%d rx_max_coalesced_frames:%d\n",
- CN6XXX_OQ_INTR_TIME, CN6XXX_OQ_INTR_PKT);
- ret = oct_cfg_rx_intrtime(lio, intr_coal);
- if (ret)
- goto ret_intrmod;
-
- ret = oct_cfg_rx_intrcnt(lio, intr_coal);
+ if (!intr_coal->use_adaptive_tx_coalesce) {
+ ret = oct_cfg_tx_intrcnt(lio, intr_coal);
if (ret)
goto ret_intrmod;
}
@@ -923,23 +1434,28 @@ static int lio_get_ts_info(struct net_device *netdev,
struct lio *lio = GET_LIO(netdev);
info->so_timestamping =
+#ifdef PTP_HARDWARE_TIMESTAMPING
SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+#endif
SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RAW_HARDWARE;
+ SOF_TIMESTAMPING_SOFTWARE;
if (lio->ptp_clock)
info->phc_index = ptp_clock_index(lio->ptp_clock);
else
info->phc_index = -1;
+#ifdef PTP_HARDWARE_TIMESTAMPING
info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
+#endif
return 0;
}
@@ -950,7 +1466,6 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct octeon_device *oct = lio->oct_dev;
struct oct_link_info *linfo;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
int ret = 0;
/* get the link info */
@@ -965,12 +1480,14 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->duplex != DUPLEX_FULL)))
return -EINVAL;
- /* Ethtool Support is not provided for XAUI and RXAUI Interfaces
+ /* Ethtool Support is not provided for XAUI, RXAUI, and XFI Interfaces
* as they operate at fixed Speed and Duplex settings
*/
- if (linfo->link.s.interface == INTERFACE_MODE_XAUI ||
- linfo->link.s.interface == INTERFACE_MODE_RXAUI) {
- dev_info(&oct->pci_dev->dev, "XAUI IFs settings cannot be modified.\n");
+ if (linfo->link.s.if_mode == INTERFACE_MODE_XAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_RXAUI ||
+ linfo->link.s.if_mode == INTERFACE_MODE_XFI) {
+ dev_info(&oct->pci_dev->dev,
+ "Autonegotiation, duplex and speed settings cannot be modified.\n");
return -EINVAL;
}
@@ -978,9 +1495,9 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_SET_SETTINGS;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 1000;
nctrl.netpndev = (u64)netdev;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
/* Passing the parameters sent by ethtool like Speed, Autoneg & Duplex
@@ -990,19 +1507,17 @@ static int lio_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
/* Autoneg ON */
nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON |
OCTNIC_NCMD_AUTONEG_ON;
- nctrl.ncmd.s.param2 = ecmd->advertising;
+ nctrl.ncmd.s.param1 = ecmd->advertising;
} else {
/* Autoneg OFF */
nctrl.ncmd.s.more = OCTNIC_NCMD_PHY_ON;
- nctrl.ncmd.s.param3 = ecmd->duplex;
+ nctrl.ncmd.s.param2 = ecmd->duplex;
- nctrl.ncmd.s.param2 = ecmd->speed;
+ nctrl.ncmd.s.param1 = ecmd->speed;
}
- nparams.resp_order = OCTEON_RESP_ORDERED;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Failed to set settings\n");
return -1;
@@ -1026,7 +1541,7 @@ static int lio_nway_reset(struct net_device *netdev)
}
/* Return register dump len. */
-static int lio_get_regs_len(struct net_device *dev)
+static int lio_get_regs_len(struct net_device *dev __attribute__((unused)))
{
return OCT_ETHTOOL_REGDUMP_LEN;
}
@@ -1170,13 +1685,12 @@ static void lio_get_regs(struct net_device *dev,
int len = 0;
struct octeon_device *oct = lio->oct_dev;
- memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
regs->version = OCT_ETHTOOL_REGSVER;
switch (oct->chip_id) {
- /* case OCTEON_CN73XX: Todo */
case OCTEON_CN68XX:
case OCTEON_CN66XX:
+ memset(regbuf, 0, OCT_ETHTOOL_REGDUMP_LEN);
len += cn6xxx_read_csr_reg(regbuf + len, oct);
len += cn6xxx_read_config_reg(regbuf + len, oct);
break;
@@ -1186,6 +1700,23 @@ static void lio_get_regs(struct net_device *dev,
}
}
+static u32 lio_get_priv_flags(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ return lio->oct_dev->priv_flags;
+}
+
+static int lio_set_priv_flags(struct net_device *netdev, u32 flags)
+{
+ struct lio *lio = GET_LIO(netdev);
+ bool intr_by_tx_bytes = !!(flags & (0x1 << OCT_PRIV_FLAG_TX_BYTES));
+
+ lio_set_priv_flag(lio->oct_dev, OCT_PRIV_FLAG_TX_BYTES,
+ intr_by_tx_bytes);
+ return 0;
+}
+
static const struct ethtool_ops lio_ethtool_ops = {
.get_settings = lio_get_settings,
.get_link = ethtool_op_get_link,
@@ -1207,6 +1738,8 @@ static const struct ethtool_ops lio_ethtool_ops = {
.set_settings = lio_set_settings,
.get_coalesce = lio_get_intr_coalesce,
.set_coalesce = lio_set_intr_coalesce,
+ .get_priv_flags = lio_get_priv_flags,
+ .set_priv_flags = lio_set_priv_flags,
.get_ts_info = lio_get_ts_info,
};
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 34d269cd5579..20d6942edf40 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -20,24 +20,12 @@
* Contact Cavium, Inc. for more information
**********************************************************************/
#include <linux/version.h>
-#include <linux/module.h>
-#include <linux/crc32.h>
-#include <linux/dma-mapping.h>
#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/ip.h>
-#include <net/ip.h>
-#include <linux/ipv6.h>
#include <linux/net_tstamp.h>
#include <linux/if_vlan.h>
#include <linux/firmware.h>
-#include <linux/ethtool.h>
#include <linux/ptp_clock_kernel.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include "octeon_config.h"
+#include <net/vxlan.h>
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -48,7 +36,6 @@
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
#include "cn68xx_device.h"
#include "liquidio_image.h"
@@ -72,6 +59,9 @@ MODULE_PARM_DESC(console_bitmask,
#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
+#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
+ (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
+
static int debug = -1;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
@@ -84,6 +74,8 @@ static int conf_type;
module_param(conf_type, int, 0);
MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs");
+static int ptp_enable = 1;
+
/* Bit mask values for lio->ifstate */
#define LIO_IFSTATE_DROQ_OPS 0x01
#define LIO_IFSTATE_REGISTERED 0x02
@@ -166,6 +158,8 @@ struct octnic_gather {
* received from the IP layer.
*/
struct octeon_sg_entry *sg;
+
+ u64 sg_dma_ptr;
};
/** This structure is used by NIC driver to store information required
@@ -220,8 +214,8 @@ static void octeon_droq_bh(unsigned long pdev)
(struct octeon_device_priv *)oct->priv;
/* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
- for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES; q_no++) {
- if (!(oct->io_qmask.oq & (1UL << q_no)))
+ for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
+ if (!(oct->io_qmask.oq & (1ULL << q_no)))
continue;
reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
MAX_PACKET_BUDGET);
@@ -241,11 +235,10 @@ static int lio_wait_for_oq_pkts(struct octeon_device *oct)
do {
pending_pkts = 0;
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct->io_qmask.oq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
- pkt_cnt += octeon_droq_check_hw_for_pkts(oct,
- oct->droq[i]);
+ pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
}
if (pkt_cnt > 0) {
pending_pkts += pkt_cnt;
@@ -361,7 +354,7 @@ static int wait_for_pending_requests(struct octeon_device *oct)
[OCTEON_ORDERED_SC_LIST].pending_req_count);
if (pcount)
schedule_timeout_uninterruptible(HZ / 10);
- else
+ else
break;
}
@@ -392,10 +385,10 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
dev_err(&oct->pci_dev->dev, "There were pending requests\n");
/* Force all requests waiting to be fetched by OCTEON to complete. */
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
struct octeon_instr_queue *iq;
- if (!(oct->io_qmask.iq & (1UL << i)))
+ if (!(oct->io_qmask.iq & (1ULL << i)))
continue;
iq = oct->instr_queue[i];
@@ -405,7 +398,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
iq->octeon_read_index = iq->host_write_index;
iq->stats.instr_processed +=
atomic_read(&iq->instr_pending);
- lio_process_iq_request_list(oct, iq);
+ lio_process_iq_request_list(oct, iq, 0);
spin_unlock_bh(&iq->lock);
}
}
@@ -500,7 +493,8 @@ static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
* \brief mmio handler
* @param pdev Pointer to PCI device
*/
-static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev)
+static pci_ers_result_t liquidio_pcie_mmio_enabled(
+ struct pci_dev *pdev __attribute__((unused)))
{
/* We should never hit this since we never ask for a reset for a Fatal
* Error. We always return DISCONNECT in io_error above.
@@ -516,7 +510,8 @@ static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev *pdev)
* Restart the card from scratch, as if from a cold-boot. Implementation
* resembles the first-half of the octeon_resume routine.
*/
-static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev)
+static pci_ers_result_t liquidio_pcie_slot_reset(
+ struct pci_dev *pdev __attribute__((unused)))
{
/* We should never hit this since we never ask for a reset for a Fatal
* Error. We always return DISCONNECT in io_error above.
@@ -533,7 +528,7 @@ static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev *pdev)
* its OK to resume normal operation. Implementation resembles the
* second-half of the octeon_resume routine.
*/
-static void liquidio_pcie_resume(struct pci_dev *pdev)
+static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
{
/* Nothing to be done here. */
}
@@ -544,7 +539,8 @@ static void liquidio_pcie_resume(struct pci_dev *pdev)
* @param pdev Pointer to PCI device
* @param state state to suspend to
*/
-static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state)
+static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
+ pm_message_t state __attribute__((unused)))
{
return 0;
}
@@ -553,7 +549,7 @@ static int liquidio_suspend(struct pci_dev *pdev, pm_message_t state)
* \brief called when resuming
* @param pdev Pointer to PCI device
*/
-static int liquidio_resume(struct pci_dev *pdev)
+static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
{
return 0;
}
@@ -678,12 +674,24 @@ static inline void txqs_start(struct net_device *netdev)
*/
static inline void txqs_wake(struct net_device *netdev)
{
+ struct lio *lio = GET_LIO(netdev);
+
if (netif_is_multiqueue(netdev)) {
int i;
- for (i = 0; i < netdev->num_tx_queues; i++)
- netif_wake_subqueue(netdev, i);
+ for (i = 0; i < netdev->num_tx_queues; i++) {
+ int qno = lio->linfo.txpciq[i %
+ (lio->linfo.num_txpciq)].s.q_no;
+
+ if (__netif_subqueue_stopped(netdev, i)) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
+ tx_restart, 1);
+ netif_wake_subqueue(netdev, i);
+ }
+ }
} else {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
+ tx_restart, 1);
netif_wake_queue(netdev);
}
}
@@ -705,7 +713,7 @@ static void start_txq(struct net_device *netdev)
{
struct lio *lio = GET_LIO(netdev);
- if (lio->linfo.link.s.status) {
+ if (lio->linfo.link.s.link_up) {
txqs_start(netdev);
return;
}
@@ -752,16 +760,23 @@ static inline int check_txq_status(struct lio *lio)
/* check each sub-queue state */
for (q = 0; q < numqs; q++) {
- iq = lio->linfo.txpciq[q & (lio->linfo.num_txpciq - 1)];
+ iq = lio->linfo.txpciq[q %
+ (lio->linfo.num_txpciq)].s.q_no;
if (octnet_iq_is_full(lio->oct_dev, iq))
continue;
- wake_q(lio->netdev, q);
- ret_val++;
+ if (__netif_subqueue_stopped(lio->netdev, q)) {
+ wake_q(lio->netdev, q);
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
+ tx_restart, 1);
+ ret_val++;
+ }
}
} else {
if (octnet_iq_is_full(lio->oct_dev, lio->txq))
return 0;
wake_q(lio->netdev, lio->txq);
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
+ tx_restart, 1);
ret_val = 1;
}
return ret_val;
@@ -787,64 +802,116 @@ static inline struct list_head *list_delete_head(struct list_head *root)
}
/**
- * \brief Delete gather list
+ * \brief Delete gather lists
* @param lio per-network private data
*/
-static void delete_glist(struct lio *lio)
+static void delete_glists(struct lio *lio)
{
struct octnic_gather *g;
+ int i;
- do {
- g = (struct octnic_gather *)
- list_delete_head(&lio->glist);
- if (g) {
- if (g->sg)
- kfree((void *)((unsigned long)g->sg -
- g->adjust));
- kfree(g);
- }
- } while (g);
+ if (!lio->glist)
+ return;
+
+ for (i = 0; i < lio->linfo.num_txpciq; i++) {
+ do {
+ g = (struct octnic_gather *)
+ list_delete_head(&lio->glist[i]);
+ if (g) {
+ if (g->sg) {
+ dma_unmap_single(&lio->oct_dev->
+ pci_dev->dev,
+ g->sg_dma_ptr,
+ g->sg_size,
+ DMA_TO_DEVICE);
+ kfree((void *)((unsigned long)g->sg -
+ g->adjust));
+ }
+ kfree(g);
+ }
+ } while (g);
+ }
+
+ kfree((void *)lio->glist);
}
/**
- * \brief Setup gather list
+ * \brief Setup gather lists
* @param lio per-network private data
*/
-static int setup_glist(struct lio *lio)
+static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
{
- int i;
+ int i, j;
struct octnic_gather *g;
- INIT_LIST_HEAD(&lio->glist);
+ lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
+ GFP_KERNEL);
+ if (!lio->glist_lock)
+ return 1;
- for (i = 0; i < lio->tx_qsize; i++) {
- g = kzalloc(sizeof(*g), GFP_KERNEL);
- if (!g)
- break;
+ lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
+ GFP_KERNEL);
+ if (!lio->glist) {
+ kfree((void *)lio->glist_lock);
+ return 1;
+ }
- g->sg_size =
- ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
+ for (i = 0; i < num_iqs; i++) {
+ int numa_node = cpu_to_node(i % num_online_cpus());
- g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
- if (!g->sg) {
- kfree(g);
- break;
+ spin_lock_init(&lio->glist_lock[i]);
+
+ INIT_LIST_HEAD(&lio->glist[i]);
+
+ for (j = 0; j < lio->tx_qsize; j++) {
+ g = kzalloc_node(sizeof(*g), GFP_KERNEL,
+ numa_node);
+ if (!g)
+ g = kzalloc(sizeof(*g), GFP_KERNEL);
+ if (!g)
+ break;
+
+ g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
+ OCT_SG_ENTRY_SIZE);
+
+ g->sg = kmalloc_node(g->sg_size + 8,
+ GFP_KERNEL, numa_node);
+ if (!g->sg)
+ g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
+ if (!g->sg) {
+ kfree(g);
+ break;
+ }
+
+ /* The gather component should be aligned on 64-bit
+ * boundary
+ */
+ if (((unsigned long)g->sg) & 7) {
+ g->adjust = 8 - (((unsigned long)g->sg) & 7);
+ g->sg = (struct octeon_sg_entry *)
+ ((unsigned long)g->sg + g->adjust);
+ }
+ g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
+ g->sg, g->sg_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev,
+ g->sg_dma_ptr)) {
+ kfree((void *)((unsigned long)g->sg -
+ g->adjust));
+ kfree(g);
+ break;
+ }
+
+ list_add_tail(&g->list, &lio->glist[i]);
}
- /* The gather component should be aligned on 64-bit boundary */
- if (((unsigned long)g->sg) & 7) {
- g->adjust = 8 - (((unsigned long)g->sg) & 7);
- g->sg = (struct octeon_sg_entry *)
- ((unsigned long)g->sg + g->adjust);
+ if (j != lio->tx_qsize) {
+ delete_glists(lio);
+ return 1;
}
- list_add_tail(&g->list, &lio->glist);
}
- if (i == lio->tx_qsize)
- return 0;
-
- delete_glist(lio);
- return 1;
+ return 0;
}
/**
@@ -858,7 +925,7 @@ static void print_link_info(struct net_device *netdev)
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
struct oct_link_info *linfo = &lio->linfo;
- if (linfo->link.s.status) {
+ if (linfo->link.s.link_up) {
netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
linfo->link.s.speed,
(linfo->link.s.duplex) ? "Full" : "Half");
@@ -880,13 +947,15 @@ static inline void update_link_status(struct net_device *netdev,
union oct_link_status *ls)
{
struct lio *lio = GET_LIO(netdev);
+ int changed = (lio->linfo.link.u64 != ls->u64);
- if ((lio->intf_open) && (lio->linfo.link.u64 != ls->u64)) {
- lio->linfo.link.u64 = ls->u64;
+ lio->linfo.link.u64 = ls->u64;
+ if ((lio->intf_open) && (changed)) {
print_link_info(netdev);
+ lio->link_changes++;
- if (lio->linfo.link.s.status) {
+ if (lio->linfo.link.s.link_up) {
netif_carrier_on(netdev);
/* start_txq(netdev); */
txqs_wake(netdev);
@@ -897,6 +966,42 @@ static inline void update_link_status(struct net_device *netdev,
}
}
+/* Runs in interrupt context. */
+static void update_txq_status(struct octeon_device *oct, int iq_num)
+{
+ struct net_device *netdev;
+ struct lio *lio;
+ struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
+
+ /*octeon_update_iq_read_idx(oct, iq);*/
+
+ netdev = oct->props[iq->ifidx].netdev;
+
+ /* This is needed because the first IQ does not have
+ * a netdev associated with it.
+ */
+ if (!netdev)
+ return;
+
+ lio = GET_LIO(netdev);
+ if (netif_is_multiqueue(netdev)) {
+ if (__netif_subqueue_stopped(netdev, iq->q_index) &&
+ lio->linfo.link.s.link_up &&
+ (!octnet_iq_is_full(oct, iq_num))) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
+ tx_restart, 1);
+ netif_wake_subqueue(netdev, iq->q_index);
+ } else {
+ if (!octnet_iq_is_full(oct, lio->txq)) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
+ lio->txq,
+ tx_restart, 1);
+ wake_q(netdev, lio->txq);
+ }
+ }
+ }
+}
+
/**
* \brief Droq packet processor sceduler
* @param oct octeon device
@@ -910,8 +1015,9 @@ void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
struct octeon_droq *droq;
if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
- for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES; oq_no++) {
- if (!(oct->droq_intr & (1 << oq_no)))
+ for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
+ oq_no++) {
+ if (!(oct->droq_intr & (1ULL << oq_no)))
continue;
droq = oct->droq[oq_no];
@@ -987,7 +1093,9 @@ static int octeon_setup_interrupt(struct octeon_device *oct)
* @param pdev PCI device structure
* @param ent unused
*/
-static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int
+liquidio_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent __attribute__((unused)))
{
struct octeon_device *oct_dev = NULL;
struct handshake *hs;
@@ -1022,6 +1130,9 @@ static int liquidio_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
}
+ oct_dev->rx_pause = 1;
+ oct_dev->tx_pause = 1;
+
dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
return 0;
@@ -1087,19 +1198,13 @@ static void octeon_destroy_resources(struct octeon_device *oct)
if (oct->flags & LIO_FLAG_MSI_ENABLED)
pci_disable_msi(oct->pci_dev);
- /* Soft reset the octeon device before exiting */
- oct->fn_list.soft_reset(oct);
-
- /* Disable the device, releasing the PCI INT */
- pci_disable_device(oct->pci_dev);
-
/* fallthrough */
case OCT_DEV_IN_RESET:
case OCT_DEV_DROQ_INIT_DONE:
/*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
mdelay(100);
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- if (!(oct->io_qmask.oq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.oq & (1ULL << i)))
continue;
octeon_delete_droq(oct, i);
}
@@ -1126,8 +1231,8 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* fallthrough */
case OCT_DEV_INSTR_QUEUE_INIT_DONE:
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- if (!(oct->io_qmask.iq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & (1ULL << i)))
continue;
octeon_delete_instr_queue(oct, i);
}
@@ -1139,14 +1244,21 @@ static void octeon_destroy_resources(struct octeon_device *oct)
/* fallthrough */
case OCT_DEV_PCI_MAP_DONE:
+
+ /* Soft reset the octeon device before exiting */
+ oct->fn_list.soft_reset(oct);
+
octeon_unmap_pci_barx(oct, 0);
octeon_unmap_pci_barx(oct, 1);
/* fallthrough */
case OCT_DEV_BEGIN_STATE:
+ /* Disable the device, releasing the PCI INT */
+ pci_disable_device(oct->pci_dev);
+
/* Nothing to be done here either */
break;
- } /* end switch(oct->status) */
+ } /* end switch (oct->status) */
tasklet_kill(&oct_priv->droq_tasklet);
}
@@ -1159,18 +1271,15 @@ static void octeon_destroy_resources(struct octeon_device *oct)
static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
{
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.s.cmd = OCTNET_CMD_RX_CTL;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = start_stop;
+ nctrl.ncmd.s.param1 = start_stop;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.netpndev = (u64)lio->netdev;
- nparams.resp_order = OCTEON_RESP_NORESPONSE;
-
- if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams) < 0)
+ if (octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl) < 0)
netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
}
@@ -1186,6 +1295,7 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
{
struct net_device *netdev = oct->props[ifidx].netdev;
struct lio *lio;
+ struct napi_struct *napi, *n;
if (!netdev) {
dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
@@ -1202,13 +1312,22 @@ static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
txqs_stop(netdev);
+ if (oct->props[lio->ifidx].napi_enabled == 1) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_disable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 0;
+ }
+
if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
unregister_netdev(netdev);
- delete_glist(lio);
+ delete_glists(lio);
free_netdev(netdev);
+ oct->props[ifidx].gmxport = -1;
+
oct->props[ifidx].netdev = NULL;
}
@@ -1227,10 +1346,15 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
return 1;
}
+ spin_lock_bh(&oct->cmd_resp_wqlock);
+ oct->cmd_resp_state = OCT_DRV_OFFLINE;
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
+
for (i = 0; i < oct->ifcount; i++) {
lio = GET_LIO(oct->props[i].netdev);
for (j = 0; j < lio->linfo.num_rxpciq; j++)
- octeon_unregister_droq_ops(oct, lio->linfo.rxpciq[j]);
+ octeon_unregister_droq_ops(oct,
+ lio->linfo.rxpciq[j].s.q_no);
}
for (i = 0; i < oct->ifcount; i++)
@@ -1274,6 +1398,7 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
{
u32 dev_id, rev_id;
int ret = 1;
+ char *s;
pci_read_config_dword(oct->pci_dev, 0, &dev_id);
pci_read_config_dword(oct->pci_dev, 8, &rev_id);
@@ -1283,22 +1408,27 @@ static int octeon_chip_specific_setup(struct octeon_device *oct)
case OCTEON_CN68XX_PCIID:
oct->chip_id = OCTEON_CN68XX;
ret = lio_setup_cn68xx_octeon_device(oct);
+ s = "CN68XX";
break;
case OCTEON_CN66XX_PCIID:
oct->chip_id = OCTEON_CN66XX;
ret = lio_setup_cn66xx_octeon_device(oct);
+ s = "CN66XX";
break;
+
default:
+ s = "?";
dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
dev_id);
}
if (!ret)
- dev_info(&oct->pci_dev->dev, "CN68XX PASS%d.%d %s\n",
+ dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
OCTEON_MAJOR_REV(oct),
OCTEON_MINOR_REV(oct),
- octeon_get_conf(oct)->card_name);
+ octeon_get_conf(oct)->card_name,
+ LIQUIDIO_VERSION);
return ret;
}
@@ -1326,6 +1456,16 @@ static int octeon_pci_os_setup(struct octeon_device *oct)
return 0;
}
+static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
+{
+ int q = 0;
+
+ if (netif_is_multiqueue(lio->netdev))
+ q = skb->queue_mapping % lio->linfo.num_txpciq;
+
+ return q;
+}
+
/**
* \brief Check Tx queue state for a given network buffer
* @param lio per-network private data
@@ -1337,14 +1477,19 @@ static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
if (netif_is_multiqueue(lio->netdev)) {
q = skb->queue_mapping;
- iq = lio->linfo.txpciq[(q & (lio->linfo.num_txpciq - 1))];
+ iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
} else {
iq = lio->txq;
+ q = iq;
}
if (octnet_iq_is_full(lio->oct_dev, iq))
return 0;
- wake_q(lio->netdev, q);
+
+ if (__netif_subqueue_stopped(lio->netdev, q)) {
+ INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
+ wake_q(lio->netdev, q);
+ }
return 1;
}
@@ -1367,7 +1512,7 @@ static void free_netbuf(void *buf)
check_txq_state(lio, skb);
- recv_buffer_free((struct sk_buff *)skb);
+ tx_buffer_free(skb);
}
/**
@@ -1380,7 +1525,7 @@ static void free_netsgbuf(void *buf)
struct sk_buff *skb;
struct lio *lio;
struct octnic_gather *g;
- int i, frags;
+ int i, frags, iq;
finfo = (struct octnet_buf_free_info *)buf;
skb = finfo->skb;
@@ -1402,17 +1547,17 @@ static void free_netsgbuf(void *buf)
i++;
}
- dma_unmap_single(&lio->oct_dev->pci_dev->dev,
- finfo->dptr, g->sg_size,
- DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
+ g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
- spin_lock(&lio->lock);
- list_add_tail(&g->list, &lio->glist);
- spin_unlock(&lio->lock);
+ iq = skb_iq(lio, skb);
+ spin_lock(&lio->glist_lock[iq]);
+ list_add_tail(&g->list, &lio->glist[iq]);
+ spin_unlock(&lio->glist_lock[iq]);
check_txq_state(lio, skb); /* mq support: sub-queue state check */
- recv_buffer_free((struct sk_buff *)skb);
+ tx_buffer_free(skb);
}
/**
@@ -1426,7 +1571,7 @@ static void free_netsgbuf_with_resp(void *buf)
struct sk_buff *skb;
struct lio *lio;
struct octnic_gather *g;
- int i, frags;
+ int i, frags, iq;
sc = (struct octeon_soft_command *)buf;
skb = (struct sk_buff *)sc->callback_arg;
@@ -1450,13 +1595,14 @@ static void free_netsgbuf_with_resp(void *buf)
i++;
}
- dma_unmap_single(&lio->oct_dev->pci_dev->dev,
- finfo->dptr, g->sg_size,
- DMA_TO_DEVICE);
+ dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
+ g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
+
+ iq = skb_iq(lio, skb);
- spin_lock(&lio->lock);
- list_add_tail(&g->list, &lio->glist);
- spin_unlock(&lio->lock);
+ spin_lock(&lio->glist_lock[iq]);
+ list_add_tail(&g->list, &lio->glist[iq]);
+ spin_unlock(&lio->glist_lock[iq]);
/* Don't free the skb yet */
@@ -1569,8 +1715,10 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
* @param rq request
* @param on is it on
*/
-static int liquidio_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+static int
+liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
+ struct ptp_clock_request *rq __attribute__((unused)),
+ int on __attribute__((unused)))
{
return -EOPNOTSUPP;
}
@@ -1657,6 +1805,7 @@ static int load_firmware(struct octeon_device *oct)
if (ret) {
dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
fw_name);
+ release_firmware(fw);
return ret;
}
@@ -1710,7 +1859,7 @@ static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
* @param buf pointer to resp structure
*/
static void if_cfg_callback(struct octeon_device *oct,
- u32 status,
+ u32 status __attribute__((unused)),
void *buf)
{
struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
@@ -1724,7 +1873,10 @@ static void if_cfg_callback(struct octeon_device *oct,
if (resp->status)
dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
CVM_CAST64(resp->status));
- ACCESS_ONCE(ctx->cond) = 1;
+ WRITE_ONCE(ctx->cond, 1);
+
+ snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
+ resp->cfg_info.liquidio_firmware_version);
/* This barrier is required to be sure that the response has been
* written fully before waking up the handler
@@ -1741,16 +1893,16 @@ static void if_cfg_callback(struct octeon_device *oct,
* @returns selected queue number
*/
static u16 select_q(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
+ void *accel_priv __attribute__((unused)),
+ select_queue_fallback_t fallback __attribute__((unused)))
{
- int qindex;
+ u32 qindex = 0;
struct lio *lio;
lio = GET_LIO(dev);
- /* select queue on chosen queue_mapping or core */
- qindex = skb_rx_queue_recorded(skb) ?
- skb_get_rx_queue(skb) : smp_processor_id();
- return (u16)(qindex & (lio->linfo.num_txpciq - 1));
+ qindex = skb_tx_hash(dev, skb);
+
+ return (u16)(qindex % (lio->linfo.num_txpciq));
}
/** Routine to push packets arriving on Octeon interface upto network layer.
@@ -1759,26 +1911,28 @@ static u16 select_q(struct net_device *dev, struct sk_buff *skb,
* @param len - size of total data received.
* @param rh - Control header associated with the packet
* @param param - additional control data with the packet
+ * @param arg - farg registered in droq_ops
*/
static void
-liquidio_push_packet(u32 octeon_id,
+liquidio_push_packet(u32 octeon_id __attribute__((unused)),
void *skbuff,
u32 len,
union octeon_rh *rh,
- void *param)
+ void *param,
+ void *arg)
{
struct napi_struct *napi = param;
- struct octeon_device *oct = lio_get_device(octeon_id);
struct sk_buff *skb = (struct sk_buff *)skbuff;
struct skb_shared_hwtstamps *shhwtstamps;
u64 ns;
- struct net_device *netdev =
- (struct net_device *)oct->props[rh->r_dh.link].netdev;
+ u16 vtag = 0;
+ struct net_device *netdev = (struct net_device *)arg;
struct octeon_droq *droq = container_of(param, struct octeon_droq,
napi);
if (netdev) {
int packet_was_received;
struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
/* Do not proceed if the interface is not in RUNNING state. */
if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
@@ -1789,32 +1943,86 @@ liquidio_push_packet(u32 octeon_id,
skb->dev = netdev;
- if (rh->r_dh.has_hwtstamp) {
- /* timestamp is included from the hardware at the
- * beginning of the packet.
- */
- if (ifstate_check(lio,
- LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
- /* Nanoseconds are in the first 64-bits
- * of the packet.
+ skb_record_rx_queue(skb, droq->q_no);
+ if (likely(len > MIN_SKB_SIZE)) {
+ struct octeon_skb_page_info *pg_info;
+ unsigned char *va;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ if (pg_info->page) {
+ /* For Paged allocation use the frags */
+ va = page_address(pg_info->page) +
+ pg_info->page_offset;
+ memcpy(skb->data, va, MIN_SKB_SIZE);
+ skb_put(skb, MIN_SKB_SIZE);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ pg_info->page,
+ pg_info->page_offset +
+ MIN_SKB_SIZE,
+ len - MIN_SKB_SIZE,
+ LIO_RXBUFFER_SZ);
+ }
+ } else {
+ struct octeon_skb_page_info *pg_info =
+ ((struct octeon_skb_page_info *)(skb->cb));
+ skb_copy_to_linear_data(skb, page_address(pg_info->page)
+ + pg_info->page_offset, len);
+ skb_put(skb, len);
+ put_page(pg_info->page);
+ }
+
+ if (((oct->chip_id == OCTEON_CN66XX) ||
+ (oct->chip_id == OCTEON_CN68XX)) &&
+ ptp_enable) {
+ if (rh->r_dh.has_hwtstamp) {
+ /* timestamp is included from the hardware at
+ * the beginning of the packet.
*/
- memcpy(&ns, (skb->data), sizeof(ns));
- shhwtstamps = skb_hwtstamps(skb);
- shhwtstamps->hwtstamp =
- ns_to_ktime(ns + lio->ptp_adjust);
+ if (ifstate_check
+ (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
+ /* Nanoseconds are in the first 64-bits
+ * of the packet.
+ */
+ memcpy(&ns, (skb->data), sizeof(ns));
+ shhwtstamps = skb_hwtstamps(skb);
+ shhwtstamps->hwtstamp =
+ ns_to_ktime(ns +
+ lio->ptp_adjust);
+ }
+ skb_pull(skb, sizeof(ns));
}
- skb_pull(skb, sizeof(ns));
}
skb->protocol = eth_type_trans(skb, skb->dev);
-
if ((netdev->features & NETIF_F_RXCSUM) &&
- (rh->r_dh.csum_verified == CNNIC_CSUM_VERIFIED))
+ (((rh->r_dh.encap_on) &&
+ (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
+ (!(rh->r_dh.encap_on) &&
+ (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
/* checksum has already been verified */
skb->ip_summed = CHECKSUM_UNNECESSARY;
else
skb->ip_summed = CHECKSUM_NONE;
+ /* Setting Encapsulation field on basis of status received
+ * from the firmware
+ */
+ if (rh->r_dh.encap_on) {
+ skb->encapsulation = 1;
+ skb->csum_level = 1;
+ droq->stats.rx_vxlan++;
+ }
+
+ /* inbound VLAN tag */
+ if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (rh->r_dh.vlan != 0)) {
+ u16 vid = rh->r_dh.vlan;
+ u16 priority = rh->r_dh.priority;
+
+ vtag = priority << 13 | vid;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
+ }
+
packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
if (packet_was_received) {
@@ -1869,39 +2077,6 @@ static void liquidio_napi_drv_callback(void *arg)
}
/**
- * \brief Main NAPI poll function
- * @param droq octeon output queue
- * @param budget maximum number of items to process
- */
-static int liquidio_napi_do_rx(struct octeon_droq *droq, int budget)
-{
- int work_done;
- struct lio *lio = GET_LIO(droq->napi.dev);
- struct octeon_device *oct = lio->oct_dev;
-
- work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
- POLL_EVENT_PROCESS_PKTS,
- budget);
- if (work_done < 0) {
- netif_info(lio, rx_err, lio->netdev,
- "Receive work_done < 0, rxq:%d\n", droq->q_no);
- goto octnet_napi_finish;
- }
-
- if (work_done > budget)
- dev_err(&oct->pci_dev->dev, ">>>> %s work_done: %d budget: %d\n",
- __func__, work_done, budget);
-
- return work_done;
-
-octnet_napi_finish:
- napi_complete(&droq->napi);
- octeon_process_droq_poll_cmd(oct, droq->q_no, POLL_EVENT_ENABLE_INTR,
- 0);
- return 0;
-}
-
-/**
* \brief Entry point for NAPI polling
* @param napi NAPI structure
* @param budget maximum number of items to process
@@ -1910,35 +2085,57 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
{
struct octeon_droq *droq;
int work_done;
+ int tx_done = 0, iq_no;
+ struct octeon_instr_queue *iq;
+ struct octeon_device *oct;
droq = container_of(napi, struct octeon_droq, napi);
+ oct = droq->oct_dev;
+ iq_no = droq->q_no;
+ /* Handle Droq descriptors */
+ work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
+ POLL_EVENT_PROCESS_PKTS,
+ budget);
- work_done = liquidio_napi_do_rx(droq, budget);
+ /* Flush the instruction queue */
+ iq = oct->instr_queue[iq_no];
+ if (iq) {
+ /* Process iq buffers with in the budget limits */
+ tx_done = octeon_flush_iq(oct, iq, 1, budget);
+ /* Update iq read-index rather than waiting for next interrupt.
+ * Return back if tx_done is false.
+ */
+ update_txq_status(oct, iq_no);
+ /*tx_done = (iq->flush_index == iq->octeon_read_index);*/
+ } else {
+ dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
+ __func__, iq_no);
+ }
- if (work_done < budget) {
+ if ((work_done < budget) && (tx_done)) {
napi_complete(napi);
octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
POLL_EVENT_ENABLE_INTR, 0);
return 0;
}
- return work_done;
+ return (!tx_done) ? (budget) : (work_done);
}
/**
* \brief Setup input and output queues
* @param octeon_dev octeon device
- * @param net_device Net device
+ * @param ifidx Interface Index
*
* Note: Queues are with respect to the octeon device. Thus
* an input queue is for egress packets, and output queues
* are for ingress packets.
*/
static inline int setup_io_queues(struct octeon_device *octeon_dev,
- struct net_device *net_device)
+ int ifidx)
{
- static int first_time = 1;
- static struct octeon_droq_ops droq_ops;
+ struct octeon_droq_ops droq_ops;
+ struct net_device *netdev;
static int cpu_id;
static int cpu_id_modulus;
struct octeon_droq *droq;
@@ -1947,23 +2144,26 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
struct lio *lio;
int num_tx_descs;
- lio = GET_LIO(net_device);
- if (first_time) {
- first_time = 0;
- memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
+ netdev = octeon_dev->props[ifidx].netdev;
- droq_ops.fptr = liquidio_push_packet;
+ lio = GET_LIO(netdev);
- droq_ops.poll_mode = 1;
- droq_ops.napi_fn = liquidio_napi_drv_callback;
- cpu_id = 0;
- cpu_id_modulus = num_present_cpus();
- }
+ memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
+
+ droq_ops.fptr = liquidio_push_packet;
+ droq_ops.farg = (void *)netdev;
+
+ droq_ops.poll_mode = 1;
+ droq_ops.napi_fn = liquidio_napi_drv_callback;
+ cpu_id = 0;
+ cpu_id_modulus = num_present_cpus();
/* set up DROQs. */
for (q = 0; q < lio->linfo.num_rxpciq; q++) {
- q_no = lio->linfo.rxpciq[q];
-
+ q_no = lio->linfo.rxpciq[q].s.q_no;
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
+ q, q_no);
retval = octeon_setup_droq(octeon_dev, q_no,
CFG_GET_NUM_RX_DESCS_NIC_IF
(octeon_get_conf(octeon_dev),
@@ -1980,7 +2180,11 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
droq = octeon_dev->droq[q_no];
napi = &droq->napi;
- netif_napi_add(net_device, napi, liquidio_napi_poll, 64);
+ dev_dbg(&octeon_dev->pci_dev->dev,
+ "netif_napi_add netdev:%llx oct:%llx\n",
+ (u64)netdev,
+ (u64)octeon_dev);
+ netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
/* designate a CPU for this droq */
droq->cpu_id = cpu_id;
@@ -1996,9 +2200,9 @@ static inline int setup_io_queues(struct octeon_device *octeon_dev,
num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
(octeon_dev),
lio->ifidx);
- retval = octeon_setup_iq(octeon_dev, lio->linfo.txpciq[q],
- num_tx_descs,
- netdev_get_tx_queue(net_device, q));
+ retval = octeon_setup_iq(octeon_dev, ifidx, q,
+ lio->linfo.txpciq[q], num_tx_descs,
+ netdev_get_tx_queue(netdev, q));
if (retval) {
dev_err(&octeon_dev->pci_dev->dev,
" %s : Runtime IQ(TxQ) creation failed.\n",
@@ -2036,7 +2240,8 @@ static inline void setup_tx_poll_fn(struct net_device *netdev)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- lio->txq_status_wq.wq = create_workqueue("txq-status");
+ lio->txq_status_wq.wq = alloc_workqueue("txq-status",
+ WQ_MEM_RECLAIM, 0);
if (!lio->txq_status_wq.wq) {
dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
return;
@@ -2048,6 +2253,14 @@ static inline void setup_tx_poll_fn(struct net_device *netdev)
&lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
}
+static inline void cleanup_tx_poll_fn(struct net_device *netdev)
+{
+ struct lio *lio = GET_LIO(netdev);
+
+ cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
+ destroy_workqueue(lio->txq_status_wq.wq);
+}
+
/**
* \brief Net device open for LiquidIO
* @param netdev network device
@@ -2058,17 +2271,22 @@ static int liquidio_open(struct net_device *netdev)
struct octeon_device *oct = lio->oct_dev;
struct napi_struct *napi, *n;
- list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
- napi_enable(napi);
+ if (oct->props[lio->ifidx].napi_enabled == 0) {
+ list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
+ napi_enable(napi);
+
+ oct->props[lio->ifidx].napi_enabled = 1;
+ }
oct_ptp_open(netdev);
ifstate_set(lio, LIO_IFSTATE_RUNNING);
+
setup_tx_poll_fn(netdev);
+
start_txq(netdev);
netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
- try_module_get(THIS_MODULE);
/* tell Octeon to start forwarding packets to host */
send_rx_ctrl_cmd(lio, 1);
@@ -2088,41 +2306,36 @@ static int liquidio_open(struct net_device *netdev)
*/
static int liquidio_stop(struct net_device *netdev)
{
- struct napi_struct *napi, *n;
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
- netif_info(lio, ifdown, lio->netdev, "Stopping interface!\n");
+ ifstate_reset(lio, LIO_IFSTATE_RUNNING);
+
+ netif_tx_disable(netdev);
+
/* Inform that netif carrier is down */
+ netif_carrier_off(netdev);
lio->intf_open = 0;
- lio->linfo.link.s.status = 0;
+ lio->linfo.link.s.link_up = 0;
+ lio->link_changes++;
- netif_carrier_off(netdev);
+ /* Pause for a moment and wait for Octeon to flush out (to the wire) any
+ * egress packets that are in-flight.
+ */
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(msecs_to_jiffies(100));
- /* tell Octeon to stop forwarding packets to host */
+ /* Now it should be safe to tell Octeon that nic interface is down. */
send_rx_ctrl_cmd(lio, 0);
- cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
- flush_workqueue(lio->txq_status_wq.wq);
- destroy_workqueue(lio->txq_status_wq.wq);
+ cleanup_tx_poll_fn(netdev);
if (lio->ptp_clock) {
ptp_clock_unregister(lio->ptp_clock);
lio->ptp_clock = NULL;
}
- ifstate_reset(lio, LIO_IFSTATE_RUNNING);
-
- /* This is a hack that allows DHCP to continue working. */
- set_bit(__LINK_STATE_START, &lio->netdev->state);
-
- list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
- napi_disable(napi);
-
- txqs_stop(netdev);
-
dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
- module_put(THIS_MODULE);
return 0;
}
@@ -2133,6 +2346,7 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
struct net_device *netdev = (struct net_device *)nctrl->netpndev;
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
+ u8 *mac;
switch (nctrl->ncmd.s.cmd) {
case OCTNET_CMD_CHANGE_DEVFLAGS:
@@ -2140,22 +2354,24 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
break;
case OCTNET_CMD_CHANGE_MACADDR:
- /* If command is successful, change the MACADDR. */
- netif_info(lio, probe, lio->netdev, " MACAddr changed to 0x%llx\n",
- CVM_CAST64(nctrl->udd[0]));
- dev_info(&oct->pci_dev->dev, "%s MACAddr changed to 0x%llx\n",
- netdev->name, CVM_CAST64(nctrl->udd[0]));
- memcpy(netdev->dev_addr, ((u8 *)&nctrl->udd[0]) + 2, ETH_ALEN);
+ mac = ((u8 *)&nctrl->udd[0]) + 2;
+ netif_info(lio, probe, lio->netdev,
+ "%s %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+ "MACAddr changed to", mac[0], mac[1],
+ mac[2], mac[3], mac[4], mac[5]);
break;
case OCTNET_CMD_CHANGE_MTU:
/* If command is successful, change the MTU. */
netif_info(lio, probe, lio->netdev, " MTU Changed from %d to %d\n",
- netdev->mtu, nctrl->ncmd.s.param2);
+ netdev->mtu, nctrl->ncmd.s.param1);
dev_info(&oct->pci_dev->dev, "%s MTU Changed from %d to %d\n",
netdev->name, netdev->mtu,
- nctrl->ncmd.s.param2);
- netdev->mtu = nctrl->ncmd.s.param2;
+ nctrl->ncmd.s.param1);
+ rtnl_lock();
+ netdev->mtu = nctrl->ncmd.s.param1;
+ call_netdevice_notifiers(NETDEV_CHANGEMTU, netdev);
+ rtnl_unlock();
break;
case OCTNET_CMD_GPIO_ACCESS:
@@ -2181,11 +2397,79 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
netdev->name);
break;
+ case OCTNET_CMD_ENABLE_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter enabled\n",
+ netdev->name);
+ break;
+
+ case OCTNET_CMD_ADD_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
+ netdev->name, nctrl->ncmd.s.param1);
+ break;
+
+ case OCTNET_CMD_DEL_VLAN_FILTER:
+ dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
+ netdev->name, nctrl->ncmd.s.param1);
+ break;
+
case OCTNET_CMD_SET_SETTINGS:
dev_info(&oct->pci_dev->dev, "%s settings changed\n",
netdev->name);
break;
+ /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_TNL_RX_CSUM_CTL:
+ if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "%s RX Checksum Offload Enabled\n",
+ netdev->name);
+ } else if (nctrl->ncmd.s.param1 ==
+ OCTNET_CMD_RXCSUM_DISABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "%s RX Checksum Offload Disabled\n",
+ netdev->name);
+ }
+ break;
+
+ /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_TNL_TX_CSUM_CTL:
+ if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "%s TX Checksum Offload Enabled\n",
+ netdev->name);
+ } else if (nctrl->ncmd.s.param1 ==
+ OCTNET_CMD_TXCSUM_DISABLE) {
+ netif_info(lio, probe, lio->netdev,
+ "%s TX Checksum Offload Disabled\n",
+ netdev->name);
+ }
+ break;
+
+ /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
+ * Command passed by NIC driver
+ */
+ case OCTNET_CMD_VXLAN_PORT_CONFIG:
+ if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
+ netif_info(lio, probe, lio->netdev,
+ "%s VxLAN Destination UDP PORT:%d ADDED\n",
+ netdev->name,
+ nctrl->ncmd.s.param1);
+ } else if (nctrl->ncmd.s.more ==
+ OCTNET_CMD_VXLAN_PORT_DEL) {
+ netif_info(lio, probe, lio->netdev,
+ "%s VxLAN Destination UDP PORT:%d DELETED\n",
+ netdev->name,
+ nctrl->ncmd.s.param1);
+ }
+ break;
+
+ case OCTNET_CMD_SET_FLOW_CTL:
+ netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
+ break;
default:
dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
@@ -2235,10 +2519,9 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
struct netdev_hw_addr *ha;
u64 *mc;
- int ret, i;
+ int ret;
int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
@@ -2246,15 +2529,14 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
/* Create a ctrl pkt command to be sent to core app. */
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = get_new_flags(netdev);
- nctrl.ncmd.s.param3 = mc_count;
+ nctrl.ncmd.s.param1 = get_new_flags(netdev);
+ nctrl.ncmd.s.param2 = mc_count;
nctrl.ncmd.s.more = mc_count;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
/* copy all the addresses into the udd */
- i = 0;
mc = &nctrl.udd[0];
netdev_for_each_mc_addr(ha, netdev) {
*mc = 0;
@@ -2270,9 +2552,7 @@ static void liquidio_set_mcast_list(struct net_device *netdev)
*/
nctrl.wait_time = 0;
- nparams.resp_order = OCTEON_RESP_NORESPONSE;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
ret);
@@ -2290,19 +2570,17 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
struct octeon_device *oct = lio->oct_dev;
struct sockaddr *addr = (struct sockaddr *)p;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
- if ((!is_valid_ether_addr(addr->sa_data)) ||
- (ifstate_check(lio, LIO_IFSTATE_RUNNING)))
+ if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = 0;
+ nctrl.ncmd.s.param1 = 0;
nctrl.ncmd.s.more = 1;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
nctrl.wait_time = 100;
@@ -2311,9 +2589,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
/* The MAC Address is presented in network byte order. */
memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
- nparams.resp_order = OCTEON_RESP_ORDERED;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
return -ENOMEM;
@@ -2341,7 +2617,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
oct = lio->oct_dev;
for (i = 0; i < lio->linfo.num_txpciq; i++) {
- iq_no = lio->linfo.txpciq[i];
+ iq_no = lio->linfo.txpciq[i].s.q_no;
iq_stats = &oct->instr_queue[iq_no]->stats;
pkts += iq_stats->tx_done;
drop += iq_stats->tx_dropped;
@@ -2357,7 +2633,7 @@ static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
bytes = 0;
for (i = 0; i < lio->linfo.num_rxpciq; i++) {
- oq_no = lio->linfo.rxpciq[i];
+ oq_no = lio->linfo.rxpciq[i].s.q_no;
oq_stats = &oct->droq[oq_no]->stats;
pkts += oq_stats->rx_pkts_received;
drop += (oq_stats->rx_dropped +
@@ -2383,19 +2659,16 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
- int max_frm_size = new_mtu + OCTNET_FRM_HEADER_SIZE;
int ret = 0;
- /* Limit the MTU to make sure the ethernet packets are between 64 bytes
- * and 65535 bytes
+ /* Limit the MTU to make sure the ethernet packets are between 68 bytes
+ * and 16000 bytes
*/
- if ((max_frm_size < OCTNET_MIN_FRM_SIZE) ||
- (max_frm_size > OCTNET_MAX_FRM_SIZE)) {
+ if ((new_mtu < LIO_MIN_MTU_SIZE) ||
+ (new_mtu > LIO_MAX_MTU_SIZE)) {
dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
- (OCTNET_MIN_FRM_SIZE - OCTNET_FRM_HEADER_SIZE),
- (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE));
+ LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE);
return -EINVAL;
}
@@ -2403,15 +2676,13 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
nctrl.ncmd.u64 = 0;
nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = new_mtu;
+ nctrl.ncmd.s.param1 = new_mtu;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- nparams.resp_order = OCTEON_RESP_ORDERED;
-
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
return -1;
@@ -2428,7 +2699,7 @@ static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
* @param ifr interface request
* @param cmd command
*/
-static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
{
struct hwtstamp_config conf;
struct lio *lio = GET_LIO(netdev);
@@ -2489,7 +2760,7 @@ static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCSHWTSTAMP:
- return hwtstamp_ioctl(netdev, ifr, cmd);
+ return hwtstamp_ioctl(netdev, ifr);
default:
return -EOPNOTSUPP;
}
@@ -2536,7 +2807,7 @@ static void handle_timestamp(struct octeon_device *oct,
}
octeon_free_soft_command(oct, sc);
- recv_buffer_free(skb);
+ tx_buffer_free(skb);
}
/* \brief Send a data packet that will be timestamped
@@ -2551,10 +2822,9 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
{
int retval;
struct octeon_soft_command *sc;
- struct octeon_instr_ih *ih;
- struct octeon_instr_rdp *rdp;
struct lio *lio;
int ring_doorbell;
+ u32 len;
lio = finfo->lio;
@@ -2576,14 +2846,13 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
sc->callback_arg = finfo->skb;
sc->iq_no = ndata->q_no;
- ih = (struct octeon_instr_ih *)&sc->cmd.ih;
- rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+ len = (u32)((struct octeon_instr_ih2 *)(&sc->cmd.cmd2.ih2))->dlengsz;
ring_doorbell = !xmit_more;
retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
- sc, ih->dlengsz, ndata->reqtype);
+ sc, len, ndata->reqtype);
- if (retval) {
+ if (retval == IQ_SEND_FAILED) {
dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
retval);
octeon_free_soft_command(oct, sc);
@@ -2594,68 +2863,6 @@ static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
return retval;
}
-static inline int is_ipv4(struct sk_buff *skb)
-{
- return (skb->protocol == htons(ETH_P_IP)) &&
- (ip_hdr(skb)->version == 4);
-}
-
-static inline int is_vlan(struct sk_buff *skb)
-{
- return skb->protocol == htons(ETH_P_8021Q);
-}
-
-static inline int is_ip_fragmented(struct sk_buff *skb)
-{
- /* The Don't fragment and Reserved flag fields are ignored.
- * IP is fragmented if
- * - the More fragments bit is set (indicating this IP is a fragment
- * with more to follow; the current offset could be 0 ).
- * - ths offset field is non-zero.
- */
- return (ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ? 1 : 0;
-}
-
-static inline int is_ipv6(struct sk_buff *skb)
-{
- return (skb->protocol == htons(ETH_P_IPV6)) &&
- (ipv6_hdr(skb)->version == 6);
-}
-
-static inline int is_with_extn_hdr(struct sk_buff *skb)
-{
- return (ipv6_hdr(skb)->nexthdr != IPPROTO_TCP) &&
- (ipv6_hdr(skb)->nexthdr != IPPROTO_UDP);
-}
-
-static inline int is_tcpudp(struct sk_buff *skb)
-{
- return (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
- (ip_hdr(skb)->protocol == IPPROTO_UDP);
-}
-
-static inline u32 get_ipv4_5tuple_tag(struct sk_buff *skb)
-{
- u32 tag;
- struct iphdr *iphdr = ip_hdr(skb);
-
- tag = crc32(0, &iphdr->protocol, 1);
- tag = crc32(tag, (u8 *)&iphdr->saddr, 8);
- tag = crc32(tag, skb_transport_header(skb), 4);
- return tag;
-}
-
-static inline u32 get_ipv6_5tuple_tag(struct sk_buff *skb)
-{
- u32 tag;
- struct ipv6hdr *ipv6hdr = ipv6_hdr(skb);
-
- tag = crc32(0, &ipv6hdr->nexthdr, 1);
- tag = crc32(tag, (u8 *)&ipv6hdr->saddr, 32);
- tag = crc32(tag, skb_transport_header(skb), 4);
- return tag;
-}
-
/** \brief Transmit networks packets to the Octeon interface
* @param skbuff skbuff struct to be passed to network layer.
* @param netdev pointer to network device
@@ -2670,18 +2877,22 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
struct octnic_data_pkt ndata;
struct octeon_device *oct;
struct oct_iq_stats *stats;
- int cpu = 0, status = 0;
+ struct octeon_instr_irh *irh;
+ union tx_info *tx_info;
+ int status = 0;
int q_idx = 0, iq_no = 0;
- int xmit_more;
+ int xmit_more, j;
+ u64 dptr = 0;
u32 tag = 0;
lio = GET_LIO(netdev);
oct = lio->oct_dev;
if (netif_is_multiqueue(netdev)) {
- cpu = skb->queue_mapping;
- q_idx = (cpu & (lio->linfo.num_txpciq - 1));
- iq_no = lio->linfo.txpciq[q_idx];
+ q_idx = skb->queue_mapping;
+ q_idx = (q_idx % (lio->linfo.num_txpciq));
+ tag = q_idx;
+ iq_no = lio->linfo.txpciq[q_idx].s.q_no;
} else {
iq_no = lio->txq;
}
@@ -2692,11 +2903,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
* transmitted.
*/
if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
- (!lio->linfo.link.s.status) ||
+ (!lio->linfo.link.s.link_up) ||
(skb->len <= 0)) {
netif_info(lio, tx_err, lio->netdev,
"Transmit failed link_status : %d\n",
- lio->linfo.link.s.status);
+ lio->linfo.link.s.link_up);
goto lio_xmit_failed;
}
@@ -2728,62 +2939,25 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
/* defer sending if queue is full */
stats->tx_iq_busy++;
netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
- ndata.q_no);
+ lio->txq);
return NETDEV_TX_BUSY;
}
}
/* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
- * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no );
+ * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
*/
ndata.datasize = skb->len;
cmdsetup.u64 = 0;
- cmdsetup.s.ifidx = lio->linfo.ifidx;
+ cmdsetup.s.iq_no = iq_no;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (is_ipv4(skb) && !is_ip_fragmented(skb) && is_tcpudp(skb)) {
- tag = get_ipv4_5tuple_tag(skb);
-
- cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
-
- if (ip_hdr(skb)->ihl > 5)
- cmdsetup.s.ipv4opts_ipv6exthdr =
- OCT_PKT_PARAM_IPV4OPTS;
-
- } else if (is_ipv6(skb)) {
- tag = get_ipv6_5tuple_tag(skb);
-
- cmdsetup.s.cksum_offset = sizeof(struct ethhdr) + 1;
-
- if (is_with_extn_hdr(skb))
- cmdsetup.s.ipv4opts_ipv6exthdr =
- OCT_PKT_PARAM_IPV6EXTHDR;
-
- } else if (is_vlan(skb)) {
- if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto
- == htons(ETH_P_IP) &&
- !is_ip_fragmented(skb) && is_tcpudp(skb)) {
- tag = get_ipv4_5tuple_tag(skb);
-
- cmdsetup.s.cksum_offset =
- sizeof(struct vlan_ethhdr) + 1;
-
- if (ip_hdr(skb)->ihl > 5)
- cmdsetup.s.ipv4opts_ipv6exthdr =
- OCT_PKT_PARAM_IPV4OPTS;
-
- } else if (vlan_eth_hdr(skb)->h_vlan_encapsulated_proto
- == htons(ETH_P_IPV6)) {
- tag = get_ipv6_5tuple_tag(skb);
-
- cmdsetup.s.cksum_offset =
- sizeof(struct vlan_ethhdr) + 1;
-
- if (is_with_extn_hdr(skb))
- cmdsetup.s.ipv4opts_ipv6exthdr =
- OCT_PKT_PARAM_IPV6EXTHDR;
- }
+ if (skb->encapsulation) {
+ cmdsetup.s.tnl_csum = 1;
+ stats->tx_vxlan++;
+ } else {
+ cmdsetup.s.transport_csum = 1;
}
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
@@ -2793,20 +2967,21 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
if (skb_shinfo(skb)->nr_frags == 0) {
cmdsetup.s.u.datasize = skb->len;
- octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag);
+ octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
+
/* Offload checksum calculation for TCP/UDP packets */
- ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev,
- skb->data,
- skb->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) {
+ dptr = dma_map_single(&oct->pci_dev->dev,
+ skb->data,
+ skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
__func__);
return NETDEV_TX_BUSY;
}
- finfo->dptr = ndata.cmd.dptr;
-
+ ndata.cmd.cmd2.dptr = dptr;
+ finfo->dptr = dptr;
ndata.reqtype = REQTYPE_NORESP_NET;
} else {
@@ -2814,9 +2989,10 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
struct skb_frag_struct *frag;
struct octnic_gather *g;
- spin_lock(&lio->lock);
- g = (struct octnic_gather *)list_delete_head(&lio->glist);
- spin_unlock(&lio->lock);
+ spin_lock(&lio->glist_lock[q_idx]);
+ g = (struct octnic_gather *)
+ list_delete_head(&lio->glist[q_idx]);
+ spin_unlock(&lio->glist_lock[q_idx]);
if (!g) {
netif_info(lio, tx_err, lio->netdev,
@@ -2826,7 +3002,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
cmdsetup.s.gather = 1;
cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
- octnet_prepare_pci_cmd(&ndata.cmd, &cmdsetup, tag);
+ octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
memset(g->sg, 0, g->sg_size);
@@ -2853,36 +3029,52 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
frag->size,
DMA_TO_DEVICE);
+ if (dma_mapping_error(&oct->pci_dev->dev,
+ g->sg[i >> 2].ptr[i & 3])) {
+ dma_unmap_single(&oct->pci_dev->dev,
+ g->sg[0].ptr[0],
+ skb->len - skb->data_len,
+ DMA_TO_DEVICE);
+ for (j = 1; j < i; j++) {
+ frag = &skb_shinfo(skb)->frags[j - 1];
+ dma_unmap_page(&oct->pci_dev->dev,
+ g->sg[j >> 2].ptr[j & 3],
+ frag->size,
+ DMA_TO_DEVICE);
+ }
+ dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
+ __func__);
+ return NETDEV_TX_BUSY;
+ }
+
add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
i++;
}
- ndata.cmd.dptr = dma_map_single(&oct->pci_dev->dev,
- g->sg, g->sg_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(&oct->pci_dev->dev, ndata.cmd.dptr)) {
- dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
- __func__);
- dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
- skb->len - skb->data_len,
- DMA_TO_DEVICE);
- return NETDEV_TX_BUSY;
- }
+ dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
+ g->sg_size, DMA_TO_DEVICE);
+ dptr = g->sg_dma_ptr;
- finfo->dptr = ndata.cmd.dptr;
+ ndata.cmd.cmd2.dptr = dptr;
+ finfo->dptr = dptr;
finfo->g = g;
ndata.reqtype = REQTYPE_NORESP_NET_SG;
}
- if (skb_shinfo(skb)->gso_size) {
- struct octeon_instr_irh *irh =
- (struct octeon_instr_irh *)&ndata.cmd.irh;
- union tx_info *tx_info = (union tx_info *)&ndata.cmd.ossp[0];
+ irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
+ tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
- irh->len = 1; /* to indicate that ossp[0] contains tx_info */
+ if (skb_shinfo(skb)->gso_size) {
tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
+ stats->tx_gso++;
+ }
+
+ /* HW insert VLAN tag */
+ if (skb_vlan_tag_present(skb)) {
+ irh->priority = skb_vlan_tag_get(skb) >> 13;
+ irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
}
xmit_more = skb->xmit_more;
@@ -2899,9 +3091,12 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
if (status == IQ_SEND_STOP)
stop_q(lio->netdev, q_idx);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
- stats->tx_done++;
+ if (skb_shinfo(skb)->gso_size)
+ stats->tx_done += skb_shinfo(skb)->gso_segs;
+ else
+ stats->tx_done++;
stats->tx_tot_bytes += skb->len;
return NETDEV_TX_OK;
@@ -2910,9 +3105,10 @@ lio_xmit_failed:
stats->tx_dropped++;
netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
iq_no, stats->tx_dropped);
- dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
- ndata.datasize, DMA_TO_DEVICE);
- recv_buffer_free(skb);
+ if (dptr)
+ dma_unmap_single(&oct->pci_dev->dev, dptr,
+ ndata.datasize, DMA_TO_DEVICE);
+ tx_buffer_free(skb);
return NETDEV_TX_OK;
}
@@ -2928,31 +3124,149 @@ static void liquidio_tx_timeout(struct net_device *netdev)
netif_info(lio, tx_err, lio->netdev,
"Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
netdev->stats.tx_dropped);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
txqs_wake(netdev);
}
-int liquidio_set_feature(struct net_device *netdev, int cmd)
+static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto __attribute__((unused)),
+ u16 vid)
{
struct lio *lio = GET_LIO(netdev);
struct octeon_device *oct = lio->oct_dev;
struct octnic_ctrl_pkt nctrl;
- struct octnic_ctrl_params nparams;
int ret = 0;
memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
nctrl.ncmd.u64 = 0;
- nctrl.ncmd.s.cmd = cmd;
- nctrl.ncmd.s.param1 = lio->linfo.ifidx;
- nctrl.ncmd.s.param2 = OCTNIC_LROIPV4 | OCTNIC_LROIPV6;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
+ nctrl.ncmd.s.param1 = vid;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto __attribute__((unused)),
+ u16 vid)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
+ nctrl.ncmd.s.param1 = vid;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+/** Sending command to enable/disable RX checksum offload
+ * @param netdev pointer to network device
+ * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
+ * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
+ * OCTNET_CMD_RXCSUM_DISABLE
+ * @returns SUCCESS or FAILURE
+ */
+int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
+ u8 rx_cmd)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = command;
+ nctrl.ncmd.s.param1 = rx_cmd;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
nctrl.wait_time = 100;
nctrl.netpndev = (u64)netdev;
nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- nparams.resp_order = OCTEON_RESP_NORESPONSE;
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev,
+ "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+/** Sending command to add/delete VxLAN UDP port to firmware
+ * @param netdev pointer to network device
+ * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
+ * @param vxlan_port VxLAN port to be added or deleted
+ * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
+ * OCTNET_CMD_VXLAN_PORT_DEL
+ * @returns SUCCESS or FAILURE
+ */
+static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
+ u16 vxlan_port, u8 vxlan_cmd_bit)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = command;
+ nctrl.ncmd.s.more = vxlan_cmd_bit;
+ nctrl.ncmd.s.param1 = vxlan_port;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
+
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
+ if (ret < 0) {
+ dev_err(&oct->pci_dev->dev,
+ "VxLAN port add/delete failed in core (ret:0x%x)\n",
+ ret);
+ }
+ return ret;
+}
+
+int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
+{
+ struct lio *lio = GET_LIO(netdev);
+ struct octeon_device *oct = lio->oct_dev;
+ struct octnic_ctrl_pkt nctrl;
+ int ret = 0;
+
+ memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
+
+ nctrl.ncmd.u64 = 0;
+ nctrl.ncmd.s.cmd = cmd;
+ nctrl.ncmd.s.param1 = param1;
+ nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
+ nctrl.wait_time = 100;
+ nctrl.netpndev = (u64)netdev;
+ nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
- ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl, nparams);
+ ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
if (ret < 0) {
dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
ret);
@@ -3008,14 +3322,55 @@ static int liquidio_set_features(struct net_device *netdev,
return 0;
if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
- liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE);
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
else if (!(features & NETIF_F_LRO) &&
(lio->dev_capability & NETIF_F_LRO))
- liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE);
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
+
+ /* Sending command to firmware to enable/disable RX checksum
+ * offload settings using ethtool
+ */
+ if (!(netdev->features & NETIF_F_RXCSUM) &&
+ (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
+ (features & NETIF_F_RXCSUM))
+ liquidio_set_rxcsum_command(netdev,
+ OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_ENABLE);
+ else if ((netdev->features & NETIF_F_RXCSUM) &&
+ (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
+ !(features & NETIF_F_RXCSUM))
+ liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_DISABLE);
return 0;
}
+static void liquidio_add_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ liquidio_vxlan_port_command(netdev,
+ OCTNET_CMD_VXLAN_PORT_CONFIG,
+ htons(ti->port),
+ OCTNET_CMD_VXLAN_PORT_ADD);
+}
+
+static void liquidio_del_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
+{
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ liquidio_vxlan_port_command(netdev,
+ OCTNET_CMD_VXLAN_PORT_CONFIG,
+ htons(ti->port),
+ OCTNET_CMD_VXLAN_PORT_DEL);
+}
+
static struct net_device_ops lionetdevops = {
.ndo_open = liquidio_open,
.ndo_stop = liquidio_stop,
@@ -3024,10 +3379,15 @@ static struct net_device_ops lionetdevops = {
.ndo_set_mac_address = liquidio_set_mac,
.ndo_set_rx_mode = liquidio_set_mcast_list,
.ndo_tx_timeout = liquidio_tx_timeout,
+
+ .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
.ndo_change_mtu = liquidio_change_mtu,
.ndo_do_ioctl = liquidio_ioctl,
.ndo_fix_features = liquidio_fix_features,
.ndo_set_features = liquidio_set_features,
+ .ndo_udp_tunnel_add = liquidio_add_vxlan_port,
+ .ndo_udp_tunnel_del = liquidio_del_vxlan_port,
};
/** \brief Entry point for the liquidio module
@@ -3082,24 +3442,27 @@ static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
{
struct octeon_device *oct = (struct octeon_device *)buf;
struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
- int ifidx = 0;
+ int gmxport = 0;
union oct_link_status *ls;
int i;
- if ((recv_pkt->buffer_size[0] != sizeof(*ls)) ||
- (recv_pkt->rh.r_nic_info.ifidx > oct->ifcount)) {
+ if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
recv_pkt->buffer_size[0],
- recv_pkt->rh.r_nic_info.ifidx);
+ recv_pkt->rh.r_nic_info.gmxport);
goto nic_info_err;
}
- ifidx = recv_pkt->rh.r_nic_info.ifidx;
+ gmxport = recv_pkt->rh.r_nic_info.gmxport;
ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
-
- update_link_status(oct->props[ifidx].netdev, ls);
+ for (i = 0; i < oct->ifcount; i++) {
+ if (oct->props[i].gmxport == gmxport) {
+ update_link_status(oct->props[i].netdev, ls);
+ break;
+ }
+ }
nic_info_err:
for (i = 0; i < recv_pkt->buffer_count; i++)
@@ -3125,13 +3488,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
struct liquidio_if_cfg_context *ctx;
struct liquidio_if_cfg_resp *resp;
struct octdev_props *props;
- int retval, num_iqueues, num_oqueues, q_no;
- u64 q_mask;
- int num_cpus = num_online_cpus();
+ int retval, num_iqueues, num_oqueues;
union oct_nic_if_cfg if_cfg;
unsigned int base_queue;
unsigned int gmx_port_id;
u32 resp_size, ctx_size;
+ u32 ifidx_or_pfnum;
/* This is to handle link status changes */
octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
@@ -3167,14 +3529,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
CFG_GET_BASE_QUE_NIC_IF(octeon_get_conf(octeon_dev), i);
gmx_port_id =
CFG_GET_GMXID_NIC_IF(octeon_get_conf(octeon_dev), i);
- if (num_iqueues > num_cpus)
- num_iqueues = num_cpus;
- if (num_oqueues > num_cpus)
- num_oqueues = num_cpus;
+ ifidx_or_pfnum = i;
+
dev_dbg(&octeon_dev->pci_dev->dev,
"requesting config for interface %d, iqs %d, oqs %d\n",
- i, num_iqueues, num_oqueues);
- ACCESS_ONCE(ctx->cond) = 0;
+ ifidx_or_pfnum, num_iqueues, num_oqueues);
+ WRITE_ONCE(ctx->cond, 0);
ctx->octeon_id = lio_get_device_id(octeon_dev);
init_waitqueue_head(&ctx->wc);
@@ -3183,16 +3543,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
if_cfg.s.num_oqueues = num_oqueues;
if_cfg.s.base_queue = base_queue;
if_cfg.s.gmx_port_id = gmx_port_id;
+
+ sc->iq_no = 0;
+
octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
- OPCODE_NIC_IF_CFG, i,
+ OPCODE_NIC_IF_CFG, 0,
if_cfg.u64, 0);
sc->callback = if_cfg_callback;
sc->callback_arg = sc;
- sc->wait_time = 1000;
+ sc->wait_time = 3000;
retval = octeon_send_soft_command(octeon_dev, sc);
- if (retval) {
+ if (retval == IQ_SEND_FAILED) {
dev_err(&octeon_dev->pci_dev->dev,
"iq/oq config failed status: %x\n",
retval);
@@ -3234,8 +3597,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
goto setup_nic_dev_fail;
}
- props = &octeon_dev->props[i];
- props->netdev = netdev;
+ SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
if (num_iqueues > 1)
lionetdevops.ndo_select_queue = select_q;
@@ -3249,23 +3611,21 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
memset(lio, 0, sizeof(struct lio));
- lio->linfo.ifidx = resp->cfg_info.ifidx;
- lio->ifidx = resp->cfg_info.ifidx;
+ lio->ifidx = ifidx_or_pfnum;
+
+ props = &octeon_dev->props[i];
+ props->gmxport = resp->cfg_info.linfo.gmxport;
+ props->netdev = netdev;
lio->linfo.num_rxpciq = num_oqueues;
lio->linfo.num_txpciq = num_iqueues;
- q_mask = resp->cfg_info.oqmask;
- /* q_mask is 0-based and already verified mask is nonzero */
for (j = 0; j < num_oqueues; j++) {
- q_no = __ffs64(q_mask);
- q_mask &= (~(1UL << q_no));
- lio->linfo.rxpciq[j] = q_no;
+ lio->linfo.rxpciq[j].u64 =
+ resp->cfg_info.linfo.rxpciq[j].u64;
}
- q_mask = resp->cfg_info.iqmask;
for (j = 0; j < num_iqueues; j++) {
- q_no = __ffs64(q_mask);
- q_mask &= (~(1UL << q_no));
- lio->linfo.txpciq[j] = q_no;
+ lio->linfo.txpciq[j].u64 =
+ resp->cfg_info.linfo.txpciq[j].u64;
}
lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
@@ -3274,16 +3634,41 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
lio->dev_capability = NETIF_F_HIGHDMA
- | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_SG | NETIF_F_RXCSUM
- | NETIF_F_TSO | NETIF_F_TSO6
- | NETIF_F_LRO;
+ | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+ | NETIF_F_SG | NETIF_F_RXCSUM
+ | NETIF_F_GRO
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
- netdev->features = lio->dev_capability;
+ /* Copy of transmit encapsulation capabilities:
+ * TSO, TSO6, Checksums for this device
+ */
+ lio->enc_dev_capability = NETIF_F_IP_CSUM
+ | NETIF_F_IPV6_CSUM
+ | NETIF_F_GSO_UDP_TUNNEL
+ | NETIF_F_HW_CSUM | NETIF_F_SG
+ | NETIF_F_RXCSUM
+ | NETIF_F_TSO | NETIF_F_TSO6
+ | NETIF_F_LRO;
+
+ netdev->hw_enc_features = (lio->enc_dev_capability &
+ ~NETIF_F_LRO);
+
+ lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
+
netdev->vlan_features = lio->dev_capability;
+ /* Add any unchangeable hw features */
+ lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
netdev->hw_features = lio->dev_capability;
+ /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
+ netdev->hw_features = netdev->hw_features &
+ ~NETIF_F_HW_VLAN_CTAG_RX;
/* Point to the properties for octeon device to which this
* interface belongs.
@@ -3291,7 +3676,6 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio->oct_dev = octeon_dev;
lio->octprops = props;
lio->netdev = netdev;
- spin_lock_init(&lio->lock);
dev_dbg(&octeon_dev->pci_dev->dev,
"if%d gmx: %d hw_addr: 0x%llx\n", i,
@@ -3306,23 +3690,22 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
ether_addr_copy(netdev->dev_addr, mac);
- if (setup_io_queues(octeon_dev, netdev)) {
+ /* By default all interfaces on a single Octeon uses the same
+ * tx and rx queues
+ */
+ lio->txq = lio->linfo.txpciq[0].s.q_no;
+ lio->rxq = lio->linfo.rxpciq[0].s.q_no;
+ if (setup_io_queues(octeon_dev, i)) {
dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
goto setup_nic_dev_fail;
}
ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
- /* By default all interfaces on a single Octeon uses the same
- * tx and rx queues
- */
- lio->txq = lio->linfo.txpciq[0];
- lio->rxq = lio->linfo.rxpciq[0];
-
lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
- if (setup_glist(lio)) {
+ if (setup_glists(octeon_dev, lio, num_iqueues)) {
dev_err(&octeon_dev->pci_dev->dev,
"Gather list allocation failed\n");
goto setup_nic_dev_fail;
@@ -3330,11 +3713,17 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
/* Register ethtool support */
liquidio_set_ethtool_ops(netdev);
+ octeon_dev->priv_flags = 0x0;
- liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE);
+ if (netdev->features & NETIF_F_LRO)
+ liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
+ OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
+
+ liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
if ((debug != -1) && (debug & NETIF_MSG_HW))
- liquidio_set_feature(netdev, OCTNET_CMD_VERBOSE_ENABLE);
+ liquidio_set_feature(netdev,
+ OCTNET_CMD_VERBOSE_ENABLE, 0);
/* Register the network device with the OS */
if (register_netdev(netdev)) {
@@ -3346,16 +3735,19 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
"Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
netif_carrier_off(netdev);
-
- if (lio->linfo.link.s.status) {
- netif_carrier_on(netdev);
- start_txq(netdev);
- } else {
- netif_carrier_off(netdev);
- }
+ lio->link_changes++;
ifstate_set(lio, LIO_IFSTATE_REGISTERED);
+ /* Sending command to firmware to enable Rx checksum offload
+ * by default at the time of setup of Liquidio driver for
+ * this device
+ */
+ liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
+ OCTNET_CMD_RXCSUM_ENABLE);
+ liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
+ OCTNET_CMD_TXCSUM_ENABLE);
+
dev_dbg(&octeon_dev->pci_dev->dev,
"NIC ifidx:%d Setup successful\n", i);
@@ -3386,7 +3778,7 @@ setup_nic_dev_fail:
static int liquidio_init_nic_module(struct octeon_device *oct)
{
struct oct_intrmod_cfg *intrmod_cfg;
- int retval = 0;
+ int i, retval = 0;
int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
@@ -3400,6 +3792,9 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
memset(oct->props, 0,
sizeof(struct octdev_props) * num_nic_ports);
+ for (i = 0; i < MAX_OCTEON_LINKS; i++)
+ oct->props[i].gmxport = -1;
+
retval = setup_nic_devices(oct);
if (retval) {
dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
@@ -3410,15 +3805,19 @@ static int liquidio_init_nic_module(struct octeon_device *oct)
/* Initialize interrupt moderation params */
intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
- intrmod_cfg->intrmod_enable = 1;
- intrmod_cfg->intrmod_check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
- intrmod_cfg->intrmod_maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
- intrmod_cfg->intrmod_minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
- intrmod_cfg->intrmod_maxcnt_trigger = LIO_INTRMOD_MAXCNT_TRIGGER;
- intrmod_cfg->intrmod_maxtmr_trigger = LIO_INTRMOD_MAXTMR_TRIGGER;
- intrmod_cfg->intrmod_mintmr_trigger = LIO_INTRMOD_MINTMR_TRIGGER;
- intrmod_cfg->intrmod_mincnt_trigger = LIO_INTRMOD_MINCNT_TRIGGER;
-
+ intrmod_cfg->rx_enable = 1;
+ intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
+ intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
+ intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
+ intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
+ intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
+ intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
+ intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
+ intrmod_cfg->tx_enable = 1;
+ intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
+ intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
+ intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
+ intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
return retval;
@@ -3481,6 +3880,7 @@ static void nic_starter(struct work_struct *work)
static int octeon_device_init(struct octeon_device *octeon_dev)
{
int j, ret;
+ char bootcmd[] = "\n";
struct octeon_device_priv *oct_priv =
(struct octeon_device_priv *)octeon_dev->priv;
atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
@@ -3558,6 +3958,7 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
/* Release any previously allocated queues */
for (j = 0; j < octeon_dev->num_oqs; j++)
octeon_delete_droq(octeon_dev, j);
+ return 1;
}
atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
@@ -3580,7 +3981,8 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
/* Setup the interrupt handler and record the INT SUM register address
*/
- octeon_setup_interrupt(octeon_dev);
+ if (octeon_setup_interrupt(octeon_dev))
+ return 1;
/* Enable Octeon device interrupts */
octeon_dev->fn_list.enable_interrupt(octeon_dev->chip);
@@ -3592,14 +3994,19 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
- if (ddr_timeout == 0) {
- dev_info(&octeon_dev->pci_dev->dev,
- "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
- }
+ if (ddr_timeout == 0)
+ dev_info(&octeon_dev->pci_dev->dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
/* Wait for the octeon to initialize DDR after the soft-reset. */
+ while (ddr_timeout == 0) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ if (schedule_timeout(HZ / 10)) {
+ /* user probably pressed Control-C */
+ return 1;
+ }
+ }
ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
if (ret) {
dev_err(&octeon_dev->pci_dev->dev,
@@ -3613,6 +4020,9 @@ static int octeon_device_init(struct octeon_device *octeon_dev)
return 1;
}
+ /* Divert uboot to take commands from host instead. */
+ ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
+
dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
ret = octeon_init_consoles(octeon_dev);
if (ret) {
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index 0ac347ccc8ba..199a8b9c7dc5 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -30,10 +30,10 @@
#include "octeon_config.h"
-#define LIQUIDIO_VERSION "1.1.9"
-#define LIQUIDIO_MAJOR_VERSION 1
-#define LIQUIDIO_MINOR_VERSION 1
-#define LIQUIDIO_MICRO_VERSION 9
+#define LIQUIDIO_BASE_VERSION "1.4"
+#define LIQUIDIO_MICRO_VERSION ".1"
+#define LIQUIDIO_PACKAGE ""
+#define LIQUIDIO_VERSION "1.4.1"
#define CONTROL_IQ 0
/** Tag types used by Octeon cores in its work. */
@@ -174,9 +174,11 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
/*------------------------- End Scatter/Gather ---------------------------*/
#define OCTNET_FRM_PTP_HEADER_SIZE 8
-#define OCTNET_FRM_HEADER_SIZE 30 /* PTP timestamp + VLAN + Ethernet */
-#define OCTNET_MIN_FRM_SIZE (64 + OCTNET_FRM_PTP_HEADER_SIZE)
+#define OCTNET_FRM_HEADER_SIZE 22 /* VLAN + Ethernet */
+
+#define OCTNET_MIN_FRM_SIZE 64
+
#define OCTNET_MAX_FRM_SIZE (16000 + OCTNET_FRM_HEADER_SIZE)
#define OCTNET_DEFAULT_FRM_SIZE (1500 + OCTNET_FRM_HEADER_SIZE)
@@ -212,6 +214,17 @@ static inline void add_sg_size(struct octeon_sg_entry *sg_entry,
#define OCTNET_CMD_VERBOSE_ENABLE 0x14
#define OCTNET_CMD_VERBOSE_DISABLE 0x15
+#define OCTNET_CMD_ENABLE_VLAN_FILTER 0x16
+#define OCTNET_CMD_ADD_VLAN_FILTER 0x17
+#define OCTNET_CMD_DEL_VLAN_FILTER 0x18
+#define OCTNET_CMD_VXLAN_PORT_CONFIG 0x19
+#define OCTNET_CMD_VXLAN_PORT_ADD 0x0
+#define OCTNET_CMD_VXLAN_PORT_DEL 0x1
+#define OCTNET_CMD_RXCSUM_ENABLE 0x0
+#define OCTNET_CMD_RXCSUM_DISABLE 0x1
+#define OCTNET_CMD_TXCSUM_ENABLE 0x0
+#define OCTNET_CMD_TXCSUM_DISABLE 0x1
+
/* RX(packets coming from wire) Checksum verification flags */
/* TCP/UDP csum */
#define CNNIC_L4SUM_VERIFIED 0x1
@@ -258,19 +271,19 @@ union octnet_cmd {
u64 more:6; /* How many udd words follow the command */
- u64 param1:29;
+ u64 reserved:29;
- u64 param2:16;
+ u64 param1:16;
- u64 param3:8;
+ u64 param2:8;
#else
- u64 param3:8;
+ u64 param2:8;
- u64 param2:16;
+ u64 param1:16;
- u64 param1:29;
+ u64 reserved:29;
u64 more:6;
@@ -283,8 +296,140 @@ union octnet_cmd {
#define OCTNET_CMD_SIZE (sizeof(union octnet_cmd))
+/* Instruction Header(DPI) - for OCTEON-III models */
+struct octeon_instr_ih3 {
+#ifdef __BIG_ENDIAN_BITFIELD
+
+ /** Reserved3 */
+ u64 reserved3:1;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Reserved2 */
+ u64 reserved2:4;
+
+ /** PKI port kind - PKIND */
+ u64 pkind:6;
+
+ /** Reserved1 */
+ u64 reserved1:32;
+
+#else
+ /** Reserved1 */
+ u64 reserved1:32;
+
+ /** PKI port kind - PKIND */
+ u64 pkind:6;
+
+ /** Reserved2 */
+ u64 reserved2:4;
+
+ /** Front Data size */
+ u64 fsz:6;
+
+ /** Data length OR no. of entries in gather list */
+ u64 dlengsz:14;
+
+ /** Gather indicator 1=gather*/
+ u64 gather:1;
+
+ /** Reserved3 */
+ u64 reserved3:1;
+
+#endif
+};
+
+/* Optional PKI Instruction Header(PKI IH) - for OCTEON-III models */
+/** BIG ENDIAN format. */
+struct octeon_instr_pki_ih3 {
+#ifdef __BIG_ENDIAN_BITFIELD
+
+ /** Wider bit */
+ u64 w:1;
+
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+
+ /** Use Tag */
+ u64 utag:1;
+
+ /** Use QPG */
+ u64 uqpg:1;
+
+ /** Reserved2 */
+ u64 reserved2:1;
+
+ /** Parse Mode */
+ u64 pm:3;
+
+ /** Skip Length */
+ u64 sl:8;
+
+ /** Use Tag Type */
+ u64 utt:1;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Reserved1 */
+ u64 reserved1:2;
+
+ /** QPG Value */
+ u64 qpg:11;
+
+ /** Tag Value */
+ u64 tag:32;
+
+#else
+
+ /** Tag Value */
+ u64 tag:32;
+
+ /** QPG Value */
+ u64 qpg:11;
+
+ /** Reserved1 */
+ u64 reserved1:2;
+
+ /** Tag type */
+ u64 tagtype:2;
+
+ /** Use Tag Type */
+ u64 utt:1;
+
+ /** Skip Length */
+ u64 sl:8;
+
+ /** Parse Mode */
+ u64 pm:3;
+
+ /** Reserved2 */
+ u64 reserved2:1;
+
+ /** Use QPG */
+ u64 uqpg:1;
+
+ /** Use Tag */
+ u64 utag:1;
+
+ /** Raw mode indicator 1 = RAW */
+ u64 raw:1;
+
+ /** Wider bit */
+ u64 w:1;
+#endif
+
+};
+
/** Instruction Header */
-struct octeon_instr_ih {
+struct octeon_instr_ih2 {
#ifdef __BIG_ENDIAN_BITFIELD
/** Raw mode indicator 1 = RAW */
u64 raw:1;
@@ -348,15 +493,15 @@ struct octeon_instr_irh {
u64 opcode:4;
u64 rflag:1;
u64 subcode:7;
- u64 len:3;
- u64 rid:13;
- u64 reserved:4;
+ u64 vlan:12;
+ u64 priority:3;
+ u64 reserved:5;
u64 ossp:32; /* opcode/subcode specific parameters */
#else
u64 ossp:32; /* opcode/subcode specific parameters */
- u64 reserved:4;
- u64 rid:13;
- u64 len:3;
+ u64 reserved:5;
+ u64 priority:3;
+ u64 vlan:12;
u64 subcode:7;
u64 rflag:1;
u64 opcode:4;
@@ -383,75 +528,77 @@ union octeon_rh {
struct {
u64 opcode:4;
u64 subcode:8;
- u64 len:3; /** additional 64-bit words */
- u64 rid:13; /** request id in response to pkt sent by host */
- u64 reserved:4;
- u64 ossp:32; /** opcode/subcode specific parameters */
+ u64 len:3; /** additional 64-bit words */
+ u64 reserved:17;
+ u64 ossp:32; /** opcode/subcode specific parameters */
} r;
struct {
u64 opcode:4;
u64 subcode:8;
- u64 len:3; /** additional 64-bit words */
- u64 rid:13; /** request id in response to pkt sent by host */
- u64 extra:24;
- u64 link:8;
+ u64 len:3; /** additional 64-bit words */
+ u64 extra:28;
+ u64 vlan:12;
+ u64 priority:3;
u64 csum_verified:3; /** checksum verified. */
u64 has_hwtstamp:1; /** Has hardware timestamp. 1 = yes. */
+ u64 encap_on:1;
+ u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */
} r_dh;
struct {
u64 opcode:4;
u64 subcode:8;
- u64 len:3; /** additional 64-bit words */
- u64 rid:13; /** request id in response to pkt sent by host */
+ u64 len:3; /** additional 64-bit words */
+ u64 reserved:11;
u64 num_gmx_ports:8;
- u64 max_nic_ports:8;
+ u64 max_nic_ports:10;
u64 app_cap_flags:4;
- u64 app_mode:16;
+ u64 app_mode:8;
+ u64 pkind:8;
} r_core_drv_init;
struct {
u64 opcode:4;
u64 subcode:8;
u64 len:3; /** additional 64-bit words */
- u64 rid:13;
- u64 reserved:4;
+ u64 reserved:8;
u64 extra:25;
- u64 ifidx:7;
+ u64 gmxport:16;
} r_nic_info;
#else
u64 u64;
struct {
u64 ossp:32; /** opcode/subcode specific parameters */
- u64 reserved:4;
- u64 rid:13; /** req id in response to pkt sent by host */
+ u64 reserved:17;
u64 len:3; /** additional 64-bit words */
u64 subcode:8;
u64 opcode:4;
} r;
struct {
+ u64 has_hash:1; /** Has hash (rth or rss). 1 = yes. */
+ u64 encap_on:1;
u64 has_hwtstamp:1; /** 1 = has hwtstamp */
u64 csum_verified:3; /** checksum verified. */
- u64 link:8;
- u64 extra:24;
- u64 rid:13; /** req id in response to pkt sent by host */
+ u64 priority:3;
+ u64 vlan:12;
+ u64 extra:28;
u64 len:3; /** additional 64-bit words */
u64 subcode:8;
u64 opcode:4;
} r_dh;
struct {
- u64 app_mode:16;
+ u64 pkind:8;
+ u64 app_mode:8;
u64 app_cap_flags:4;
- u64 max_nic_ports:8;
+ u64 max_nic_ports:10;
u64 num_gmx_ports:8;
- u64 rid:13;
+ u64 reserved:11;
u64 len:3; /** additional 64-bit words */
u64 subcode:8;
u64 opcode:4;
} r_core_drv_init;
struct {
- u64 ifidx:7;
+ u64 gmxport:16;
u64 extra:25;
- u64 reserved:4;
- u64 rid:13;
+ u64 reserved:8;
u64 len:3; /** additional 64-bit words */
u64 subcode:8;
u64 opcode:4;
@@ -461,30 +608,25 @@ union octeon_rh {
#define OCT_RH_SIZE (sizeof(union octeon_rh))
-#define OCT_PKT_PARAM_IPV4OPTS 1
-#define OCT_PKT_PARAM_IPV6EXTHDR 2
-
union octnic_packet_params {
u32 u32;
struct {
#ifdef __BIG_ENDIAN_BITFIELD
- u32 reserved:6;
+ u32 reserved:24;
+ u32 ip_csum:1; /* Perform IP header checksum(s) */
+ /* Perform Outer transport header checksum */
+ u32 transport_csum:1;
+ /* Find tunnel, and perform transport csum. */
u32 tnl_csum:1;
- u32 ip_csum:1;
- u32 ipv4opts_ipv6exthdr:2;
- u32 ipsec_ops:4;
- u32 tsflag:1;
- u32 csoffset:9;
- u32 ifidx:8;
+ u32 tsflag:1; /* Timestamp this packet */
+ u32 ipsec_ops:4; /* IPsec operation */
#else
- u32 ifidx:8;
- u32 csoffset:9;
- u32 tsflag:1;
u32 ipsec_ops:4;
- u32 ipv4opts_ipv6exthdr:2;
- u32 ip_csum:1;
+ u32 tsflag:1;
u32 tnl_csum:1;
- u32 reserved:6;
+ u32 transport_csum:1;
+ u32 ip_csum:1;
+ u32 reserved:24;
#endif
} s;
};
@@ -496,56 +638,96 @@ union oct_link_status {
struct {
#ifdef __BIG_ENDIAN_BITFIELD
u64 duplex:8;
- u64 status:8;
u64 mtu:16;
u64 speed:16;
+ u64 link_up:1;
u64 autoneg:1;
- u64 interface:4;
+ u64 if_mode:5;
u64 pause:1;
- u64 reserved:10;
+ u64 flashing:1;
+ u64 reserved:15;
#else
- u64 reserved:10;
+ u64 reserved:15;
+ u64 flashing:1;
u64 pause:1;
- u64 interface:4;
+ u64 if_mode:5;
u64 autoneg:1;
+ u64 link_up:1;
u64 speed:16;
u64 mtu:16;
- u64 status:8;
u64 duplex:8;
#endif
} s;
};
+/** The txpciq info passed to host from the firmware */
+
+union oct_txpciq {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 q_no:8;
+ u64 port:8;
+ u64 pkind:6;
+ u64 use_qpg:1;
+ u64 qpg:11;
+ u64 reserved:30;
+#else
+ u64 reserved:30;
+ u64 qpg:11;
+ u64 use_qpg:1;
+ u64 pkind:6;
+ u64 port:8;
+ u64 q_no:8;
+#endif
+ } s;
+};
+
+/** The rxpciq info passed to host from the firmware */
+
+union oct_rxpciq {
+ u64 u64;
+
+ struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u64 q_no:8;
+ u64 reserved:56;
+#else
+ u64 reserved:56;
+ u64 q_no:8;
+#endif
+ } s;
+};
+
/** Information for a OCTEON ethernet interface shared between core & host. */
struct oct_link_info {
union oct_link_status link;
u64 hw_addr;
#ifdef __BIG_ENDIAN_BITFIELD
- u16 gmxport;
- u8 rsvd[3];
- u8 num_txpciq;
- u8 num_rxpciq;
- u8 ifidx;
+ u64 gmxport:16;
+ u64 rsvd:32;
+ u64 num_txpciq:8;
+ u64 num_rxpciq:8;
#else
- u8 ifidx;
- u8 num_rxpciq;
- u8 num_txpciq;
- u8 rsvd[3];
- u16 gmxport;
+ u64 num_rxpciq:8;
+ u64 num_txpciq:8;
+ u64 rsvd:32;
+ u64 gmxport:16;
#endif
- u8 txpciq[MAX_IOQS_PER_NICIF];
- u8 rxpciq[MAX_IOQS_PER_NICIF];
+ union oct_txpciq txpciq[MAX_IOQS_PER_NICIF];
+ union oct_rxpciq rxpciq[MAX_IOQS_PER_NICIF];
};
#define OCT_LINK_INFO_SIZE (sizeof(struct oct_link_info))
struct liquidio_if_cfg_info {
- u64 ifidx;
u64 iqmask; /** mask for IQs enabled for the port */
u64 oqmask; /** mask for OQs enabled for the port */
struct oct_link_info linfo; /** initial link information */
+ char liquidio_firmware_version[32];
};
/** Stats for each NIC port in RX direction. */
@@ -570,10 +752,18 @@ struct nic_rx_stats {
u64 fw_err_pko;
u64 fw_err_link;
u64 fw_err_drop;
+ u64 fw_rx_vxlan;
+ u64 fw_rx_vxlan_err;
+
+ /* LRO */
u64 fw_lro_pkts; /* Number of packets that are LROed */
u64 fw_lro_octs; /* Number of octets that are LROed */
u64 fw_total_lro; /* Number of LRO packets formed */
u64 fw_lro_aborts; /* Number of times lRO of packet aborted */
+ u64 fw_lro_aborts_port;
+ u64 fw_lro_aborts_seq;
+ u64 fw_lro_aborts_tsval;
+ u64 fw_lro_aborts_timer;
/* intrmod: packet forward rate */
u64 fwd_rate;
};
@@ -597,9 +787,14 @@ struct nic_tx_stats {
/* firmware stats */
u64 fw_total_sent;
u64 fw_total_fwd;
+ u64 fw_total_fwd_bytes;
u64 fw_err_pko;
u64 fw_err_link;
u64 fw_err_drop;
+ u64 fw_err_tso;
+ u64 fw_tso; /* number of tso requests */
+ u64 fw_tso_fwd; /* number of packets segmented in tso */
+ u64 fw_tx_vxlan;
};
struct oct_link_stats {
@@ -630,23 +825,44 @@ struct oct_mdio_cmd {
#define OCT_LINK_STATS_SIZE (sizeof(struct oct_link_stats))
+/* intrmod: max. packet rate threshold */
+#define LIO_INTRMOD_MAXPKT_RATETHR 196608
+/* intrmod: min. packet rate threshold */
+#define LIO_INTRMOD_MINPKT_RATETHR 9216
+/* intrmod: max. packets to trigger interrupt */
+#define LIO_INTRMOD_RXMAXCNT_TRIGGER 384
+/* intrmod: min. packets to trigger interrupt */
+#define LIO_INTRMOD_RXMINCNT_TRIGGER 1
+/* intrmod: max. time to trigger interrupt */
+#define LIO_INTRMOD_RXMAXTMR_TRIGGER 128
+/* 66xx:intrmod: min. time to trigger interrupt
+ * (value of 1 is optimum for TCP_RR)
+ */
+#define LIO_INTRMOD_RXMINTMR_TRIGGER 1
+
+/* intrmod: max. packets to trigger interrupt */
+#define LIO_INTRMOD_TXMAXCNT_TRIGGER 64
+/* intrmod: min. packets to trigger interrupt */
+#define LIO_INTRMOD_TXMINCNT_TRIGGER 0
+
+/* intrmod: poll interval in seconds */
#define LIO_INTRMOD_CHECK_INTERVAL 1
-#define LIO_INTRMOD_MAXPKT_RATETHR 196608 /* max pkt rate threshold */
-#define LIO_INTRMOD_MINPKT_RATETHR 9216 /* min pkt rate threshold */
-#define LIO_INTRMOD_MAXCNT_TRIGGER 384 /* max pkts to trigger interrupt */
-#define LIO_INTRMOD_MINCNT_TRIGGER 1 /* min pkts to trigger interrupt */
-#define LIO_INTRMOD_MAXTMR_TRIGGER 128 /* max time to trigger interrupt */
-#define LIO_INTRMOD_MINTMR_TRIGGER 32 /* min time to trigger interrupt */
struct oct_intrmod_cfg {
- u64 intrmod_enable;
- u64 intrmod_check_intrvl;
- u64 intrmod_maxpkt_ratethr;
- u64 intrmod_minpkt_ratethr;
- u64 intrmod_maxcnt_trigger;
- u64 intrmod_maxtmr_trigger;
- u64 intrmod_mincnt_trigger;
- u64 intrmod_mintmr_trigger;
+ u64 rx_enable;
+ u64 tx_enable;
+ u64 check_intrvl;
+ u64 maxpkt_ratethr;
+ u64 minpkt_ratethr;
+ u64 rx_maxcnt_trigger;
+ u64 rx_mincnt_trigger;
+ u64 rx_maxtmr_trigger;
+ u64 rx_mintmr_trigger;
+ u64 tx_mincnt_trigger;
+ u64 tx_maxcnt_trigger;
+ u64 rx_frames;
+ u64 tx_frames;
+ u64 rx_usecs;
};
#define BASE_QUEUE_NOT_REQUESTED 65535
@@ -659,9 +875,9 @@ union oct_nic_if_cfg {
u64 num_iqueues:16;
u64 num_oqueues:16;
u64 gmx_port_id:8;
- u64 reserved:8;
+ u64 vf_id:8;
#else
- u64 reserved:8;
+ u64 vf_id:8;
u64 gmx_port_id:8;
u64 num_oqueues:16;
u64 num_iqueues:16;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
index 62a8dd5cd3dc..b3396e3a8bab 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h
@@ -37,7 +37,7 @@
/* Maximum octeon devices defined as MAX_OCTEON_NICIF to support
* multiple(<= MAX_OCTEON_NICIF) Miniports
*/
-#define MAX_OCTEON_NICIF 32
+#define MAX_OCTEON_NICIF 128
#define MAX_OCTEON_DEVICES MAX_OCTEON_NICIF
#define MAX_OCTEON_LINKS MAX_OCTEON_NICIF
#define MAX_OCTEON_MULTICAST_ADDR 32
@@ -135,7 +135,7 @@
#define CFG_GET_IS_SLI_BP_ON(cfg) ((cfg)->misc.enable_sli_oq_bp)
/* Max IOQs per OCTEON Link */
-#define MAX_IOQS_PER_NICIF 32
+#define MAX_IOQS_PER_NICIF 64
enum lio_card_type {
LIO_210SV = 0, /* Two port, 66xx */
@@ -226,7 +226,7 @@ struct octeon_oq_config {
*/
u64 refill_threshold:16;
- /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ /** If set, the Output queue uses info-pointer mode. (Default: 1) */
u64 info_ptr:32;
/* Max number of OQs available */
@@ -236,7 +236,7 @@ struct octeon_oq_config {
/* Max number of OQs available */
u64 max_oqs:8;
- /** If set, the Output queue uses info-pointer mode. (Default: 1 ) */
+ /** If set, the Output queue uses info-pointer mode. (Default: 1) */
u64 info_ptr:32;
/** The number of buffers that were consumed during packet processing by
@@ -416,9 +416,11 @@ struct octeon_config {
#define DISPATCH_LIST_SIZE BIT(OPCODE_MASK_BITS)
/* Maximum number of Octeon Instruction (command) queues */
-#define MAX_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES
+#define MAX_OCTEON_INSTR_QUEUES(oct) CN6XXX_MAX_INPUT_QUEUES
+/* Maximum number of Octeon Output queues */
+#define MAX_OCTEON_OUTPUT_QUEUES(oct) CN6XXX_MAX_OUTPUT_QUEUES
-/* Maximum number of Octeon Instruction (command) queues */
-#define MAX_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES
+#define MAX_POSSIBLE_OCTEON_INSTR_QUEUES CN6XXX_MAX_INPUT_QUEUES
+#define MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES CN6XXX_MAX_OUTPUT_QUEUES
#endif /* __OCTEON_CONFIG_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_console.c b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
index 466147e409c9..bbb50ea66f16 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_console.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_console.c
@@ -23,27 +23,14 @@
/**
* @file octeon_console.c
*/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
-#include "octeon_network.h"
-#include "cn66xx_regs.h"
-#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
#include "octeon_mem_ops.h"
static void octeon_remote_lock(void);
@@ -51,6 +38,8 @@ static void octeon_remote_unlock(void);
static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
const char *name,
u32 flags);
+static int octeon_console_read(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 buf_size);
#define MIN(a, b) min((a), (b))
#define CAST_ULL(v) ((u64)(v))
@@ -170,8 +159,8 @@ struct octeon_pci_console_desc {
offsetof(struct cvmx_bootmem_desc, field), \
SIZEOF_FIELD(struct cvmx_bootmem_desc, field))
-#define __cvmx_bootmem_lock(flags)
-#define __cvmx_bootmem_unlock(flags)
+#define __cvmx_bootmem_lock(flags) (flags = flags)
+#define __cvmx_bootmem_unlock(flags) (flags = flags)
/**
* This macro returns a member of the
@@ -234,7 +223,7 @@ static void CVMX_BOOTMEM_NAMED_GET_NAME(struct octeon_device *oct,
u32 len)
{
addr += offsetof(struct cvmx_bootmem_named_block_desc, name);
- octeon_pci_read_core_mem(oct, addr, str, len);
+ octeon_pci_read_core_mem(oct, addr, (u8 *)str, len);
str[len] = 0;
}
@@ -323,6 +312,9 @@ static u64 cvmx_bootmem_phy_named_block_find(struct octeon_device *oct,
if (name && named_size) {
char *name_tmp =
kmalloc(name_length + 1, GFP_KERNEL);
+ if (!name_tmp)
+ break;
+
CVMX_BOOTMEM_NAMED_GET_NAME(oct, named_addr,
name_tmp,
name_length);
@@ -383,7 +375,7 @@ static void octeon_remote_unlock(void)
int octeon_console_send_cmd(struct octeon_device *oct, char *cmd_str,
u32 wait_hundredths)
{
- u32 len = strlen(cmd_str);
+ u32 len = (u32)strlen(cmd_str);
dev_dbg(&oct->pci_dev->dev, "sending \"%s\" to bootloader\n", cmd_str);
@@ -440,8 +432,7 @@ int octeon_wait_for_bootloader(struct octeon_device *oct,
}
static void octeon_console_handle_result(struct octeon_device *oct,
- size_t console_num,
- char *buffer, s32 bytes_read)
+ size_t console_num)
{
struct octeon_console *console;
@@ -492,7 +483,7 @@ static void check_console(struct work_struct *work)
struct octeon_console *console;
struct cavium_wk *wk = (struct cavium_wk *)work;
struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
- size_t console_num = wk->ctxul;
+ u32 console_num = (u32)wk->ctxul;
u32 delay;
console = &oct->console[console_num];
@@ -505,20 +496,17 @@ static void check_console(struct work_struct *work)
*/
bytes_read =
octeon_console_read(oct, console_num, console_buffer,
- sizeof(console_buffer) - 1, 0);
+ sizeof(console_buffer) - 1);
if (bytes_read > 0) {
total_read += bytes_read;
- if (console->waiting) {
- octeon_console_handle_result(oct, console_num,
- console_buffer,
- bytes_read);
- }
+ if (console->waiting)
+ octeon_console_handle_result(oct, console_num);
if (octeon_console_debug_enabled(console_num)) {
output_console_line(oct, console, console_num,
console_buffer, bytes_read);
}
} else if (bytes_read < 0) {
- dev_err(&oct->pci_dev->dev, "Error reading console %lu, ret=%d\n",
+ dev_err(&oct->pci_dev->dev, "Error reading console %u, ret=%d\n",
console_num, bytes_read);
}
@@ -530,7 +518,7 @@ static void check_console(struct work_struct *work)
*/
if (octeon_console_debug_enabled(console_num) &&
(total_read == 0) && (console->leftover[0])) {
- dev_info(&oct->pci_dev->dev, "%lu: %s\n",
+ dev_info(&oct->pci_dev->dev, "%u: %s\n",
console_num, console->leftover);
console->leftover[0] = '\0';
}
@@ -675,8 +663,8 @@ static inline int octeon_console_avail_bytes(u32 buffer_size,
octeon_console_free_bytes(buffer_size, wr_idx, rd_idx);
}
-int octeon_console_read(struct octeon_device *oct, u32 console_num,
- char *buffer, u32 buf_size, u32 flags)
+static int octeon_console_read(struct octeon_device *oct, u32 console_num,
+ char *buffer, u32 buf_size)
{
int bytes_to_read;
u32 rd_idx, wr_idx;
@@ -712,7 +700,7 @@ int octeon_console_read(struct octeon_device *oct, u32 console_num,
bytes_to_read = console->buffer_size - rd_idx;
octeon_pci_read_core_mem(oct, console->output_base_addr + rd_idx,
- buffer, bytes_to_read);
+ (u8 *)buffer, bytes_to_read);
octeon_write_device_mem32(oct, console->addr +
offsetof(struct octeon_pci_console,
output_read_index),
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index f67641a2ff9e..0eb504a4379a 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -19,28 +19,19 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/crc32.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
#include "liquidio_image.h"
#include "octeon_mem_ops.h"
@@ -449,10 +440,10 @@ static struct octeon_config_ptr {
};
static char oct_dev_state_str[OCT_DEV_STATES + 1][32] = {
- "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
+ "BEGIN", "PCI-MAP-DONE", "DISPATCH-INIT-DONE",
"IQ-INIT-DONE", "SCBUFF-POOL-INIT-DONE", "RESPLIST-INIT-DONE",
"DROQ-INIT-DONE", "IO-QUEUES-INIT-DONE", "CONSOLE-INIT-DONE",
- "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
+ "HOST-READY", "CORE-READY", "RUNNING", "IN-RESET",
"INVALID"
};
@@ -550,17 +541,19 @@ static char *get_oct_app_string(u32 app_mode)
return oct_dev_app_str[CVM_DRV_INVALID_APP - CVM_DRV_APP_START];
}
+u8 fbuf[4 * 1024 * 1024];
+
int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
size_t size)
{
int ret = 0;
- u8 *p;
- u8 *buffer;
+ u8 *p = fbuf;
u32 crc32_result;
u64 load_addr;
u32 image_len;
struct octeon_firmware_file_header *h;
- u32 i;
+ u32 i, rem, base_len = strlen(LIQUIDIO_BASE_VERSION);
+ char *base;
if (size < sizeof(struct octeon_firmware_file_header)) {
dev_err(&oct->pci_dev->dev, "Firmware file too small (%d < %d).\n",
@@ -576,19 +569,26 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
return -EINVAL;
}
- crc32_result =
- crc32(~0, data,
- sizeof(struct octeon_firmware_file_header) -
- sizeof(u32)) ^ ~0U;
+ crc32_result = crc32((unsigned int)~0, data,
+ sizeof(struct octeon_firmware_file_header) -
+ sizeof(u32)) ^ ~0U;
if (crc32_result != be32_to_cpu(h->crc32)) {
dev_err(&oct->pci_dev->dev, "Firmware CRC mismatch (0x%08x != 0x%08x).\n",
crc32_result, be32_to_cpu(h->crc32));
return -EINVAL;
}
- if (memcmp(LIQUIDIO_VERSION, h->version, strlen(LIQUIDIO_VERSION))) {
- dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s, got %s.\n",
- LIQUIDIO_VERSION, h->version);
+ if (strncmp(LIQUIDIO_PACKAGE, h->version, strlen(LIQUIDIO_PACKAGE))) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware package type. Expected %s, got %s.\n",
+ LIQUIDIO_PACKAGE, h->version);
+ return -EINVAL;
+ }
+
+ base = h->version + strlen(LIQUIDIO_PACKAGE);
+ ret = memcmp(LIQUIDIO_BASE_VERSION, base, base_len);
+ if (ret) {
+ dev_err(&oct->pci_dev->dev, "Unmatched firmware version. Expected %s.x, got %s.\n",
+ LIQUIDIO_BASE_VERSION, base);
return -EINVAL;
}
@@ -602,60 +602,58 @@ int octeon_download_firmware(struct octeon_device *oct, const u8 *data,
snprintf(oct->fw_info.liquidio_firmware_version, 32, "LIQUIDIO: %s",
h->version);
- buffer = kmalloc(size, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
- memcpy(buffer, data, size);
-
- p = buffer + sizeof(struct octeon_firmware_file_header);
+ data += sizeof(struct octeon_firmware_file_header);
+ dev_info(&oct->pci_dev->dev, "%s: Loading %d images\n", __func__,
+ be32_to_cpu(h->num_images));
/* load all images */
for (i = 0; i < be32_to_cpu(h->num_images); i++) {
load_addr = be64_to_cpu(h->desc[i].addr);
image_len = be32_to_cpu(h->desc[i].len);
- /* validate the image */
- crc32_result = crc32(~0, p, image_len) ^ ~0U;
- if (crc32_result != be32_to_cpu(h->desc[i].crc32)) {
- dev_err(&oct->pci_dev->dev,
- "Firmware CRC mismatch in image %d (0x%08x != 0x%08x).\n",
- i, crc32_result,
- be32_to_cpu(h->desc[i].crc32));
- ret = -EINVAL;
- goto done_downloading;
- }
+ dev_info(&oct->pci_dev->dev, "Loading firmware %d at %llx\n",
+ image_len, load_addr);
- /* download the image */
- octeon_pci_write_core_mem(oct, load_addr, p, image_len);
+ /* Write in 4MB chunks*/
+ rem = image_len;
- p += image_len;
- dev_dbg(&oct->pci_dev->dev,
- "Downloaded image %d (%d bytes) to address 0x%016llx\n",
- i, image_len, load_addr);
+ while (rem) {
+ if (rem < (4 * 1024 * 1024))
+ size = rem;
+ else
+ size = 4 * 1024 * 1024;
+
+ memcpy(p, data, size);
+
+ /* download the image */
+ octeon_pci_write_core_mem(oct, load_addr, p, (u32)size);
+
+ data += size;
+ rem -= (u32)size;
+ load_addr += size;
+ }
}
+ dev_info(&oct->pci_dev->dev, "Writing boot command: %s\n",
+ h->bootcmd);
/* Invoke the bootcmd */
ret = octeon_console_send_cmd(oct, h->bootcmd, 50);
-done_downloading:
- kfree(buffer);
-
- return ret;
+ return 0;
}
void octeon_free_device_mem(struct octeon_device *oct)
{
- u32 i;
+ int i;
- for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) {
- /* could check mask as well */
- vfree(oct->droq[i]);
+ for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
+ if (oct->io_qmask.oq & (1ULL << i))
+ vfree(oct->droq[i]);
}
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- /* could check mask as well */
- vfree(oct->instr_queue[i]);
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (oct->io_qmask.iq & (1ULL << i))
+ vfree(oct->instr_queue[i]);
}
i = oct->octeon_id;
@@ -737,55 +735,61 @@ struct octeon_device *octeon_allocate_device(u32 pci_id,
octeon_device[oct_idx] = oct;
oct->octeon_id = oct_idx;
- snprintf((oct->device_name), sizeof(oct->device_name),
+ snprintf(oct->device_name, sizeof(oct->device_name),
"LiquidIO%d", (oct->octeon_id));
return oct;
}
+/* this function is only for setting up the first queue */
int octeon_setup_instr_queues(struct octeon_device *oct)
{
- u32 i, num_iqs = 0;
u32 num_descs = 0;
+ u32 iq_no = 0;
+ union oct_txpciq txpciq;
+ int numa_node = cpu_to_node(iq_no % num_online_cpus());
/* this causes queue 0 to be default queue */
- if (OCTEON_CN6XXX(oct)) {
- num_iqs = 1;
+ if (OCTEON_CN6XXX(oct))
num_descs =
CFG_GET_NUM_DEF_TX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
- }
oct->num_iqs = 0;
- for (i = 0; i < num_iqs; i++) {
- oct->instr_queue[i] =
+ oct->instr_queue[0] = vmalloc_node(sizeof(*oct->instr_queue[0]),
+ numa_node);
+ if (!oct->instr_queue[0])
+ oct->instr_queue[0] =
vmalloc(sizeof(struct octeon_instr_queue));
- if (!oct->instr_queue[i])
- return 1;
-
- memset(oct->instr_queue[i], 0,
- sizeof(struct octeon_instr_queue));
-
- oct->instr_queue[i]->app_ctx = (void *)(size_t)i;
- if (octeon_init_instr_queue(oct, i, num_descs))
- return 1;
-
- oct->num_iqs++;
+ if (!oct->instr_queue[0])
+ return 1;
+ memset(oct->instr_queue[0], 0, sizeof(struct octeon_instr_queue));
+ oct->instr_queue[0]->q_index = 0;
+ oct->instr_queue[0]->app_ctx = (void *)(size_t)0;
+ oct->instr_queue[0]->ifidx = 0;
+ txpciq.u64 = 0;
+ txpciq.s.q_no = iq_no;
+ txpciq.s.use_qpg = 0;
+ txpciq.s.qpg = 0;
+ if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
+ /* prevent memory leak */
+ vfree(oct->instr_queue[0]);
+ return 1;
}
+ oct->num_iqs++;
return 0;
}
int octeon_setup_output_queues(struct octeon_device *oct)
{
- u32 i, num_oqs = 0;
u32 num_descs = 0;
u32 desc_size = 0;
+ u32 oq_no = 0;
+ int numa_node = cpu_to_node(oq_no % num_online_cpus());
/* this causes queue 0 to be default queue */
if (OCTEON_CN6XXX(oct)) {
- /* CFG_GET_OQ_MAX_BASE_Q(CHIP_FIELD(oct, cn6xxx, conf)); */
- num_oqs = 1;
num_descs =
CFG_GET_NUM_DEF_RX_DESCS(CHIP_FIELD(oct, cn6xxx, conf));
desc_size =
@@ -793,19 +797,15 @@ int octeon_setup_output_queues(struct octeon_device *oct)
}
oct->num_oqs = 0;
+ oct->droq[0] = vmalloc_node(sizeof(*oct->droq[0]), numa_node);
+ if (!oct->droq[0])
+ oct->droq[0] = vmalloc(sizeof(*oct->droq[0]));
+ if (!oct->droq[0])
+ return 1;
- for (i = 0; i < num_oqs; i++) {
- oct->droq[i] = vmalloc(sizeof(*oct->droq[i]));
- if (!oct->droq[i])
- return 1;
-
- memset(oct->droq[i], 0, sizeof(struct octeon_droq));
-
- if (octeon_init_droq(oct, i, num_descs, desc_size, NULL))
- return 1;
-
- oct->num_oqs++;
- }
+ if (octeon_init_droq(oct, oq_no, num_descs, desc_size, NULL))
+ return 1;
+ oct->num_oqs++;
return 0;
}
@@ -1007,79 +1007,6 @@ octeon_register_dispatch_fn(struct octeon_device *oct,
return 0;
}
-/* octeon_unregister_dispatch_fn
- * Parameters:
- * oct - octeon device
- * opcode - driver should unregister the function for this opcode
- * subcode - driver should unregister the function for this subcode
- * Description:
- * Unregister the function set for this opcode+subcode.
- * Returns:
- * Success: 0
- * Failure: 1
- * Locks:
- * No locks are held.
- */
-int
-octeon_unregister_dispatch_fn(struct octeon_device *oct, u16 opcode,
- u16 subcode)
-{
- int retval = 0;
- u32 idx;
- struct list_head *dispatch, *dfree = NULL, *tmp2;
- u16 combined_opcode = OPCODE_SUBCODE(opcode, subcode);
-
- idx = combined_opcode & OCTEON_OPCODE_MASK;
-
- spin_lock_bh(&oct->dispatch.lock);
-
- if (oct->dispatch.count == 0) {
- spin_unlock_bh(&oct->dispatch.lock);
- dev_err(&oct->pci_dev->dev,
- "No dispatch functions registered for this device\n");
- return 1;
- }
-
- if (oct->dispatch.dlist[idx].opcode == combined_opcode) {
- dispatch = &oct->dispatch.dlist[idx].list;
- if (dispatch->next != dispatch) {
- dispatch = dispatch->next;
- oct->dispatch.dlist[idx].opcode =
- ((struct octeon_dispatch *)dispatch)->opcode;
- oct->dispatch.dlist[idx].dispatch_fn =
- ((struct octeon_dispatch *)
- dispatch)->dispatch_fn;
- oct->dispatch.dlist[idx].arg =
- ((struct octeon_dispatch *)dispatch)->arg;
- list_del(dispatch);
- dfree = dispatch;
- } else {
- oct->dispatch.dlist[idx].opcode = 0;
- oct->dispatch.dlist[idx].dispatch_fn = NULL;
- oct->dispatch.dlist[idx].arg = NULL;
- }
- } else {
- retval = 1;
- list_for_each_safe(dispatch, tmp2,
- &(oct->dispatch.dlist[idx].
- list)) {
- if (((struct octeon_dispatch *)dispatch)->opcode ==
- combined_opcode) {
- list_del(dispatch);
- dfree = dispatch;
- retval = 0;
- }
- }
- }
-
- if (!retval)
- oct->dispatch.count--;
-
- spin_unlock_bh(&oct->dispatch.lock);
- vfree(dfree);
- return retval;
-}
-
int octeon_core_drv_init(struct octeon_recv_info *recv_info, void *buf)
{
u32 i;
@@ -1154,8 +1081,8 @@ core_drv_init_err:
int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
{
- if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES) &&
- (oct->io_qmask.iq & (1UL << q_no)))
+ if (oct && (q_no < MAX_OCTEON_INSTR_QUEUES(oct)) &&
+ (oct->io_qmask.iq & (1ULL << q_no)))
return oct->instr_queue[q_no]->max_count;
return -1;
@@ -1163,8 +1090,8 @@ int octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no)
int octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no)
{
- if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES) &&
- (oct->io_qmask.oq & (1UL << q_no)))
+ if (oct && (q_no < MAX_OCTEON_OUTPUT_QUEUES(oct)) &&
+ (oct->io_qmask.oq & (1ULL << q_no)))
return oct->droq[q_no]->max_count;
return -1;
}
@@ -1255,10 +1182,10 @@ void lio_pci_writeq(struct octeon_device *oct,
int octeon_mem_access_ok(struct octeon_device *oct)
{
u64 access_okay = 0;
+ u64 lmc0_reset_ctl;
/* Check to make sure a DDR interface is enabled */
- u64 lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
-
+ lmc0_reset_ctl = lio_pci_readq(oct, CN6XXX_LMC0_RESET_CTL);
access_okay = (lmc0_reset_ctl & CN6XXX_LMC0_RESET_CTL_DDR3RST_MASK);
return access_okay ? 0 : 1;
@@ -1272,9 +1199,6 @@ int octeon_wait_for_ddr_init(struct octeon_device *oct, u32 *timeout)
if (!timeout)
return ret;
- while (*timeout == 0)
- schedule_timeout_uninterruptible(HZ / 10);
-
for (ms = 0; (ret != 0) && ((*timeout == 0) || (ms <= *timeout));
ms += HZ / 10) {
ret = octeon_mem_access_ok(oct);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.h b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
index 36e1f85df8c4..01edfb404346 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.h
@@ -152,9 +152,9 @@ struct octeon_mmio {
#define MAX_OCTEON_MAPS 32
struct octeon_io_enable {
- u32 iq;
- u32 oq;
- u32 iq64B;
+ u64 iq;
+ u64 oq;
+ u64 iq64B;
};
struct octeon_reg_list {
@@ -204,8 +204,7 @@ struct octeon_fn_list {
void (*bar1_idx_setup)(struct octeon_device *, u64, u32, int);
void (*bar1_idx_write)(struct octeon_device *, u32, u32);
u32 (*bar1_idx_read)(struct octeon_device *, u32);
- u32 (*update_iq_read_idx)(struct octeon_device *,
- struct octeon_instr_queue *);
+ u32 (*update_iq_read_idx)(struct octeon_instr_queue *);
void (*enable_oq_pkt_time_intr)(struct octeon_device *, u32);
void (*disable_oq_pkt_time_intr)(struct octeon_device *, u32);
@@ -222,7 +221,7 @@ struct octeon_fn_list {
/* Structure for named memory blocks
* Number of descriptors
- * available can be changed without affecting compatiblity,
+ * available can be changed without affecting compatibility,
* but name length changes require a bump in the bootmem
* descriptor version
* Note: This structure must be naturally 64 bit aligned, as a single
@@ -255,7 +254,7 @@ struct oct_fw_info {
struct cavium_wk {
struct delayed_work work;
void *ctxptr;
- size_t ctxul;
+ u64 ctxul;
};
struct cavium_wq {
@@ -267,6 +266,8 @@ struct octdev_props {
/* Each interface in the Octeon device has a network
* device pointer (used for OS specific calls).
*/
+ int napi_enabled;
+ int gmxport;
struct net_device *netdev;
};
@@ -324,7 +325,8 @@ struct octeon_device {
struct octeon_sc_buffer_pool sc_buf_pool;
/** The input instruction queues */
- struct octeon_instr_queue *instr_queue[MAX_OCTEON_INSTR_QUEUES];
+ struct octeon_instr_queue *instr_queue
+ [MAX_POSSIBLE_OCTEON_INSTR_QUEUES];
/** The doubly-linked list of instruction response */
struct octeon_response_list response_list[MAX_RESPONSE_LISTS];
@@ -332,7 +334,7 @@ struct octeon_device {
u32 num_oqs;
/** The DROQ output queues */
- struct octeon_droq *droq[MAX_OCTEON_OUTPUT_QUEUES];
+ struct octeon_droq *droq[MAX_POSSIBLE_OCTEON_OUTPUT_QUEUES];
struct octeon_io_enable io_qmask;
@@ -381,15 +383,29 @@ struct octeon_device {
struct cavium_wq dma_comp_wq;
- struct cavium_wq check_db_wq[MAX_OCTEON_INSTR_QUEUES];
+ /** Lock for dma response list */
+ spinlock_t cmd_resp_wqlock;
+ u32 cmd_resp_state;
+
+ struct cavium_wq check_db_wq[MAX_POSSIBLE_OCTEON_INSTR_QUEUES];
struct cavium_wk nic_poll_work;
struct cavium_wk console_poll_work[MAX_OCTEON_MAPS];
void *priv;
+
+ int rx_pause;
+ int tx_pause;
+
+ struct oct_link_stats link_stats; /*stastics from firmware*/
+
+ /* private flags to control driver-specific features through ethtool */
+ u32 priv_flags;
};
+#define OCT_DRV_ONLINE 1
+#define OCT_DRV_OFFLINE 2
#define OCTEON_CN6XXX(oct) ((oct->chip_id == OCTEON_CN66XX) || \
(oct->chip_id == OCTEON_CN68XX))
#define CHIP_FIELD(oct, TYPE, field) \
@@ -569,8 +585,7 @@ int octeon_add_console(struct octeon_device *oct, u32 console_num);
int octeon_console_write(struct octeon_device *oct, u32 console_num,
char *buffer, u32 write_request_size, u32 flags);
int octeon_console_write_avail(struct octeon_device *oct, u32 console_num);
-int octeon_console_read(struct octeon_device *oct, u32 console_num,
- char *buffer, u32 buf_size, u32 flags);
+
int octeon_console_read_avail(struct octeon_device *oct, u32 console_num);
/** Removes all attached consoles. */
@@ -646,4 +661,17 @@ void *oct_get_config_info(struct octeon_device *oct, u16 card_type);
*/
struct octeon_config *octeon_get_conf(struct octeon_device *oct);
+/* LiquidIO driver pivate flags */
+enum {
+ OCT_PRIV_FLAG_TX_BYTES = 0, /* Tx interrupts by pending byte count */
+};
+
+static inline void lio_set_priv_flag(struct octeon_device *octdev, u32 flag,
+ u32 val)
+{
+ if (val)
+ octdev->priv_flags |= (0x1 << flag);
+ else
+ octdev->priv_flags &= ~(0x1 << flag);
+}
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 174072b3740b..e0afe4c1fd01 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -19,30 +19,18 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
-
-/* #define CAVIUM_ONLY_PERF_MODE */
#define CVM_MIN(d1, d2) (((d1) < (d2)) ? (d1) : (d2))
#define CVM_MAX(d1, d2) (((d1) > (d2)) ? (d1) : (d2))
@@ -104,8 +92,12 @@ static inline void *octeon_get_dispatch_arg(struct octeon_device *octeon_dev,
return fn_arg;
}
-u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
- struct octeon_droq *droq)
+/** Check for packets on Droq. This function should be called with
+ * lock held.
+ * @param droq - Droq on which count is checked.
+ * @return Returns packet count.
+ */
+u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq)
{
u32 pkt_count = 0;
@@ -151,22 +143,26 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
struct octeon_droq *droq)
{
u32 i;
+ struct octeon_skb_page_info *pg_info;
for (i = 0; i < droq->max_count; i++) {
- if (droq->recv_buf_list[i].buffer) {
- if (droq->desc_ring) {
- lio_unmap_ring_info(oct->pci_dev,
- (u64)droq->
- desc_ring[i].info_ptr,
- OCT_DROQ_INFO_SIZE);
- lio_unmap_ring(oct->pci_dev,
- (u64)droq->desc_ring[i].
- buffer_ptr,
- droq->buffer_size);
- }
- recv_buffer_free(droq->recv_buf_list[i].buffer);
- droq->recv_buf_list[i].buffer = NULL;
- }
+ pg_info = &droq->recv_buf_list[i].pg_info;
+
+ if (pg_info->dma)
+ lio_unmap_ring(oct->pci_dev,
+ (u64)pg_info->dma);
+ pg_info->dma = 0;
+
+ if (pg_info->page)
+ recv_buffer_destroy(droq->recv_buf_list[i].buffer,
+ pg_info);
+
+ if (droq->desc_ring && droq->desc_ring[i].info_ptr)
+ lio_unmap_ring_info(oct->pci_dev,
+ (u64)droq->
+ desc_ring[i].info_ptr,
+ OCT_DROQ_INFO_SIZE);
+ droq->recv_buf_list[i].buffer = NULL;
}
octeon_droq_reset_indices(droq);
@@ -181,25 +177,23 @@ octeon_droq_setup_ring_buffers(struct octeon_device *oct,
struct octeon_droq_desc *desc_ring = droq->desc_ring;
for (i = 0; i < droq->max_count; i++) {
- buf = recv_buffer_alloc(oct, droq->q_no, droq->buffer_size);
+ buf = recv_buffer_alloc(oct, &droq->recv_buf_list[i].pg_info);
if (!buf) {
dev_err(&oct->pci_dev->dev, "%s buffer alloc failed\n",
__func__);
+ droq->stats.rx_alloc_failure++;
return -ENOMEM;
}
droq->recv_buf_list[i].buffer = buf;
droq->recv_buf_list[i].data = get_rbd(buf);
-
droq->info_list[i].length = 0;
/* map ring buffers into memory */
desc_ring[i].info_ptr = lio_map_ring_info(droq, i);
desc_ring[i].buffer_ptr =
- lio_map_ring(oct->pci_dev,
- droq->recv_buf_list[i].buffer,
- droq->buffer_size);
+ lio_map_ring(droq->recv_buf_list[i].buffer);
}
octeon_droq_reset_indices(droq);
@@ -242,6 +236,8 @@ int octeon_init_droq(struct octeon_device *oct,
struct octeon_droq *droq;
u32 desc_ring_size = 0, c_num_descs = 0, c_buf_size = 0;
u32 c_pkts_per_intr = 0, c_refill_threshold = 0;
+ int orig_node = dev_to_node(&oct->pci_dev->dev);
+ int numa_node = cpu_to_node(q_no % num_online_cpus());
dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no);
@@ -261,15 +257,23 @@ int octeon_init_droq(struct octeon_device *oct,
struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);
c_pkts_per_intr = (u32)CFG_GET_OQ_PKTS_PER_INTR(conf6x);
- c_refill_threshold = (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
+ c_refill_threshold =
+ (u32)CFG_GET_OQ_REFILL_THRESHOLD(conf6x);
+ } else {
+ return 1;
}
droq->max_count = c_num_descs;
droq->buffer_size = c_buf_size;
desc_ring_size = droq->max_count * OCT_DROQ_DESC_SIZE;
+ set_dev_node(&oct->pci_dev->dev, numa_node);
droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
(dma_addr_t *)&droq->desc_ring_dma);
+ set_dev_node(&oct->pci_dev->dev, orig_node);
+ if (!droq->desc_ring)
+ droq->desc_ring = lio_dma_alloc(oct, desc_ring_size,
+ (dma_addr_t *)&droq->desc_ring_dma);
if (!droq->desc_ring) {
dev_err(&oct->pci_dev->dev,
@@ -283,12 +287,11 @@ int octeon_init_droq(struct octeon_device *oct,
droq->max_count);
droq->info_list =
- cnnic_alloc_aligned_dma(oct->pci_dev,
- (droq->max_count * OCT_DROQ_INFO_SIZE),
- &droq->info_alloc_size,
- &droq->info_base_addr,
- &droq->info_list_dma);
-
+ cnnic_numa_alloc_aligned_dma((droq->max_count *
+ OCT_DROQ_INFO_SIZE),
+ &droq->info_alloc_size,
+ &droq->info_base_addr,
+ numa_node);
if (!droq->info_list) {
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
@@ -297,7 +300,12 @@ int octeon_init_droq(struct octeon_device *oct,
}
droq->recv_buf_list = (struct octeon_recv_buffer *)
- vmalloc(droq->max_count *
+ vmalloc_node(droq->max_count *
+ OCT_DROQ_RECVBUF_SIZE,
+ numa_node);
+ if (!droq->recv_buf_list)
+ droq->recv_buf_list = (struct octeon_recv_buffer *)
+ vmalloc(droq->max_count *
OCT_DROQ_RECVBUF_SIZE);
if (!droq->recv_buf_list) {
dev_err(&oct->pci_dev->dev, "Output queue recv buf list alloc failed\n");
@@ -320,7 +328,7 @@ int octeon_init_droq(struct octeon_device *oct,
/* For 56xx Pass1, this function won't be called, so no checks. */
oct->fn_list.setup_oq_regs(oct, q_no);
- oct->io_qmask.oq |= (1 << q_no);
+ oct->io_qmask.oq |= (1ULL << q_no);
return 0;
@@ -358,6 +366,7 @@ static inline struct octeon_recv_info *octeon_create_recv_info(
struct octeon_recv_pkt *recv_pkt;
struct octeon_recv_info *recv_info;
u32 i, bytes_left;
+ struct octeon_skb_page_info *pg_info;
info = &droq->info_list[idx];
@@ -375,9 +384,14 @@ static inline struct octeon_recv_info *octeon_create_recv_info(
bytes_left = (u32)info->length;
while (buf_cnt) {
- lio_unmap_ring(octeon_dev->pci_dev,
- (u64)droq->desc_ring[idx].buffer_ptr,
- droq->buffer_size);
+ {
+ pg_info = &droq->recv_buf_list[idx].pg_info;
+
+ lio_unmap_ring(octeon_dev->pci_dev,
+ (u64)pg_info->dma);
+ pg_info->page = NULL;
+ pg_info->dma = 0;
+ }
recv_pkt->buffer_size[i] =
(bytes_left >=
@@ -449,6 +463,7 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
void *buf = NULL;
u8 *data;
u32 desc_refilled = 0;
+ struct octeon_skb_page_info *pg_info;
desc_ring = droq->desc_ring;
@@ -458,13 +473,22 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
* the buffer, else allocate.
*/
if (!droq->recv_buf_list[droq->refill_idx].buffer) {
- buf = recv_buffer_alloc(octeon_dev, droq->q_no,
- droq->buffer_size);
+ pg_info =
+ &droq->recv_buf_list[droq->refill_idx].pg_info;
+ /* Either recycle the existing pages or go for
+ * new page alloc
+ */
+ if (pg_info->page)
+ buf = recv_buffer_reuse(octeon_dev, pg_info);
+ else
+ buf = recv_buffer_alloc(octeon_dev, pg_info);
/* If a buffer could not be allocated, no point in
* continuing
*/
- if (!buf)
+ if (!buf) {
+ droq->stats.rx_alloc_failure++;
break;
+ }
droq->recv_buf_list[droq->refill_idx].buffer =
buf;
data = get_rbd(buf);
@@ -476,11 +500,8 @@ octeon_droq_refill(struct octeon_device *octeon_dev, struct octeon_droq *droq)
droq->recv_buf_list[droq->refill_idx].data = data;
desc_ring[droq->refill_idx].buffer_ptr =
- lio_map_ring(octeon_dev->pci_dev,
- droq->recv_buf_list[droq->
- refill_idx].buffer,
- droq->buffer_size);
-
+ lio_map_ring(droq->recv_buf_list[droq->
+ refill_idx].buffer);
/* Reset any previous values in the length field. */
droq->info_list[droq->refill_idx].length = 0;
@@ -539,7 +560,9 @@ octeon_droq_dispatch_pkt(struct octeon_device *oct,
droq->stats.dropped_nomem++;
}
} else {
- dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function\n");
+ dev_err(&oct->pci_dev->dev, "DROQ: No dispatch function (opcode %u/%u)\n",
+ (unsigned int)rh->r.opcode,
+ (unsigned int)rh->r.subcode);
droq->stats.dropped_nodispatch++;
} /* else (dispatch_fn ... */
@@ -586,6 +609,8 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
for (pkt = 0; pkt < pkt_count; pkt++) {
u32 pkt_len = 0;
struct sk_buff *nicbuf = NULL;
+ struct octeon_skb_page_info *pg_info;
+ void *buf;
info = &droq->info_list[droq->read_idx];
octeon_swap_8B_data((u64 *)info, 2);
@@ -605,7 +630,6 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
rh = &info->rh;
total_len += (u32)info->length;
-
if (OPCODE_SLOW_PATH(rh)) {
u32 buf_cnt;
@@ -614,50 +638,45 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
droq->refill_count += buf_cnt;
} else {
if (info->length <= droq->buffer_size) {
- lio_unmap_ring(oct->pci_dev,
- (u64)droq->desc_ring[
- droq->read_idx].buffer_ptr,
- droq->buffer_size);
pkt_len = (u32)info->length;
nicbuf = droq->recv_buf_list[
droq->read_idx].buffer;
+ pg_info = &droq->recv_buf_list[
+ droq->read_idx].pg_info;
+ if (recv_buffer_recycle(oct, pg_info))
+ pg_info->page = NULL;
droq->recv_buf_list[droq->read_idx].buffer =
NULL;
+
INCR_INDEX_BY1(droq->read_idx, droq->max_count);
- skb_put(nicbuf, pkt_len);
droq->refill_count++;
} else {
- nicbuf = octeon_fast_packet_alloc(oct, droq,
- droq->q_no,
- (u32)
+ nicbuf = octeon_fast_packet_alloc((u32)
info->length);
pkt_len = 0;
/* nicbuf allocation can fail. We'll handle it
* inside the loop.
*/
while (pkt_len < info->length) {
- int cpy_len;
+ int cpy_len, idx = droq->read_idx;
- cpy_len = ((pkt_len +
- droq->buffer_size) >
- info->length) ?
+ cpy_len = ((pkt_len + droq->buffer_size)
+ > info->length) ?
((u32)info->length - pkt_len) :
droq->buffer_size;
if (nicbuf) {
- lio_unmap_ring(oct->pci_dev,
- (u64)
- droq->desc_ring
- [droq->read_idx].
- buffer_ptr,
- droq->
- buffer_size);
octeon_fast_packet_next(droq,
nicbuf,
cpy_len,
- droq->
- read_idx
- );
+ idx);
+ buf = droq->recv_buf_list[idx].
+ buffer;
+ recv_buffer_fast_free(buf);
+ droq->recv_buf_list[idx].buffer
+ = NULL;
+ } else {
+ droq->stats.rx_alloc_failure++;
}
pkt_len += cpy_len;
@@ -668,12 +687,14 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
}
if (nicbuf) {
- if (droq->ops.fptr)
+ if (droq->ops.fptr) {
droq->ops.fptr(oct->octeon_id,
- nicbuf, pkt_len,
- rh, &droq->napi);
- else
+ nicbuf, pkt_len,
+ rh, &droq->napi,
+ droq->ops.farg);
+ } else {
recv_buffer_free(nicbuf);
+ }
}
}
@@ -681,16 +702,16 @@ octeon_droq_fast_process_packets(struct octeon_device *oct,
int desc_refilled = octeon_droq_refill(oct, droq);
/* Flush the droq descriptor data to memory to be sure
- * that when we update the credits the data in memory
- * is accurate.
- */
+ * that when we update the credits the data in memory
+ * is accurate.
+ */
wmb();
writel((desc_refilled), droq->pkts_credit_reg);
/* make sure mmio write completes */
mmiowb();
}
- } /* for ( each packet )... */
+ } /* for (each packet)... */
/* Increment refill_count by the number of buffers processed. */
droq->stats.pkts_received += pkt;
@@ -721,7 +742,7 @@ octeon_droq_process_packets(struct octeon_device *oct,
if (pkt_count > budget)
pkt_count = budget;
- /* Grab the lock */
+ /* Grab the droq lock */
spin_lock(&droq->lock);
pkts_processed = octeon_droq_fast_process_packets(oct, droq, pkt_count);
@@ -783,7 +804,7 @@ octeon_droq_process_poll_pkts(struct octeon_device *oct,
total_pkts_processed += pkts_processed;
- octeon_droq_check_hw_for_pkts(oct, droq);
+ octeon_droq_check_hw_for_pkts(droq);
}
spin_unlock(&droq->lock);
@@ -807,18 +828,6 @@ octeon_process_droq_poll_cmd(struct octeon_device *oct, u32 q_no, int cmd,
u32 arg)
{
struct octeon_droq *droq;
- struct octeon_config *oct_cfg = NULL;
-
- oct_cfg = octeon_get_conf(oct);
-
- if (!oct_cfg)
- return -EINVAL;
-
- if (q_no >= CFG_GET_OQ_MAX_Q(oct_cfg)) {
- dev_err(&oct->pci_dev->dev, "%s: droq id (%d) exceeds MAX (%d)\n",
- __func__, q_no, (oct->num_oqs - 1));
- return -EINVAL;
- }
droq = oct->droq[q_no];
@@ -937,6 +946,7 @@ int octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no)
spin_lock_irqsave(&droq->lock, flags);
droq->ops.fptr = NULL;
+ droq->ops.farg = NULL;
droq->ops.drop_on_max = 0;
spin_unlock_irqrestore(&droq->lock, flags);
@@ -949,6 +959,7 @@ int octeon_create_droq(struct octeon_device *oct,
u32 desc_size, void *app_ctx)
{
struct octeon_droq *droq;
+ int numa_node = cpu_to_node(q_no % num_online_cpus());
if (oct->droq[q_no]) {
dev_dbg(&oct->pci_dev->dev, "Droq already in use. Cannot create droq %d again\n",
@@ -957,7 +968,9 @@ int octeon_create_droq(struct octeon_device *oct,
}
/* Allocate the DS for the new droq. */
- droq = vmalloc(sizeof(*droq));
+ droq = vmalloc_node(sizeof(*droq), numa_node);
+ if (!droq)
+ droq = vmalloc(sizeof(*droq));
if (!droq)
goto create_droq_fail;
memset(droq, 0, sizeof(struct octeon_droq));
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
index 7940ccee12d9..5a6fb9113bbd 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.h
@@ -65,6 +65,17 @@ struct octeon_droq_info {
#define OCT_DROQ_INFO_SIZE (sizeof(struct octeon_droq_info))
+struct octeon_skb_page_info {
+ /* DMA address for the page */
+ dma_addr_t dma;
+
+ /* Page for the rx dma **/
+ struct page *page;
+
+ /** which offset into page */
+ unsigned int page_offset;
+};
+
/** Pointer to data buffer.
* Driver keeps a pointer to the data buffer that it made available to
* the Octeon device. Since the descriptor ring keeps physical (bus)
@@ -77,6 +88,9 @@ struct octeon_recv_buffer {
/** Data in the packet buffer. */
u8 *data;
+
+ /** pg_info **/
+ struct octeon_skb_page_info pg_info;
};
#define OCT_DROQ_RECVBUF_SIZE (sizeof(struct octeon_recv_buffer))
@@ -106,6 +120,13 @@ struct oct_droq_stats {
/** Num of Packets dropped due to receive path failures. */
u64 rx_dropped;
+
+ /** Num of vxlan packets received; */
+ u64 rx_vxlan;
+
+ /** Num of failures of recv_buffer_alloc() */
+ u64 rx_alloc_failure;
+
};
#define POLL_EVENT_INTR_ARRIVED 1
@@ -213,7 +234,8 @@ struct octeon_droq_ops {
* data in the buffer. The receive header gives the port
* number to the caller. Function pointer is set by caller.
*/
- void (*fptr)(u32, void *, u32, union octeon_rh *, void *);
+ void (*fptr)(u32, void *, u32, union octeon_rh *, void *, void *);
+ void *farg;
/* This function will be called by the driver for all NAPI related
* events. The first param is the octeon id. The second param is the
@@ -394,24 +416,9 @@ int octeon_register_dispatch_fn(struct octeon_device *oct,
u16 subcode,
octeon_dispatch_fn_t fn, void *fn_arg);
-/** Remove registration for an opcode/subcode. This will delete the mapping for
- * an opcode/subcode. The dispatch function will be unregistered and will no
- * longer be called if a packet with the opcode/subcode arrives in the driver
- * output queues.
- * @param oct - the octeon device to unregister from.
- * @param opcode - the opcode to be unregistered.
- * @param subcode - the subcode to be unregistered.
- *
- * @return Success: 0; Failure: 1
- */
-int octeon_unregister_dispatch_fn(struct octeon_device *oct,
- u16 opcode,
- u16 subcode);
-
void octeon_droq_print_stats(void);
-u32 octeon_droq_check_hw_for_pkts(struct octeon_device *oct,
- struct octeon_droq *droq);
+u32 octeon_droq_check_hw_for_pkts(struct octeon_droq *droq);
int octeon_create_droq(struct octeon_device *oct, u32 q_no,
u32 num_descs, u32 desc_size, void *app_ctx);
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
index 592fe49b589d..ff4b1d6f007b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h
@@ -65,6 +65,11 @@ struct oct_iq_stats {
u64 tx_iq_busy;/**< Numof times this iq was found to be full. */
u64 tx_dropped;/**< Numof pkts dropped dueto xmitpath errors. */
u64 tx_tot_bytes;/**< Total count of bytes sento to network. */
+ u64 tx_gso; /* count of tso */
+ u64 tx_vxlan; /* tunnel */
+ u64 tx_dmamap_fail;
+ u64 tx_restart;
+ /*u64 tx_timeout_count;*/
};
#define OCT_IQ_STATS_SIZE (sizeof(struct oct_iq_stats))
@@ -75,18 +80,26 @@ struct oct_iq_stats {
* a Octeon device has one such structure to represent it.
*/
struct octeon_instr_queue {
+ struct octeon_device *oct_dev;
+
/** A spinlock to protect access to the input ring. */
spinlock_t lock;
+ /** A spinlock to protect while posting on the ring. */
+ spinlock_t post_lock;
+
+ /** A spinlock to protect access to the input ring.*/
+ spinlock_t iq_flush_running_lock;
+
/** Flag that indicates if the queue uses 64 byte commands. */
u32 iqcmd_64B:1;
- /** Queue Number. */
- u32 iq_no:5;
+ /** Queue info. */
+ union oct_txpciq txpciq;
u32 rsvd:17;
- /* Controls the periodic flushing of iq */
+ /* Controls whether extra flushing of IQ is done on Tx */
u32 do_auto_flush:1;
u32 status:8;
@@ -147,6 +160,13 @@ struct octeon_instr_queue {
/** Application context */
void *app_ctx;
+
+ /* network stack queue index */
+ int q_index;
+
+ /*os ifidx associated with this queue */
+ int ifidx;
+
};
/*---------------------- INSTRUCTION FORMAT ----------------------------*/
@@ -176,12 +196,12 @@ struct octeon_instr_32B {
/** 64-byte instruction format.
* Format of instruction for a 64-byte mode input queue.
*/
-struct octeon_instr_64B {
+struct octeon_instr2_64B {
/** Pointer where the input data is available. */
u64 dptr;
/** Instruction Header. */
- u64 ih;
+ u64 ih2;
/** Input Request Header. */
u64 irh;
@@ -198,14 +218,44 @@ struct octeon_instr_64B {
u64 rptr;
u64 reserved;
+};
+
+struct octeon_instr3_64B {
+ /** Pointer where the input data is available. */
+ u64 dptr;
+
+ /** Instruction Header. */
+ u64 ih3;
+
+ /** Instruction Header. */
+ u64 pki_ih3;
+
+ /** Input Request Header. */
+ u64 irh;
+ /** opcode/subcode specific parameters */
+ u64 ossp[2];
+
+ /** Return Data Parameters */
+ u64 rdp;
+
+ /** Pointer where the response for a RAW mode packet will be written
+ * by Octeon.
+ */
+ u64 rptr;
+
+};
+
+union octeon_instr_64B {
+ struct octeon_instr2_64B cmd2;
+ struct octeon_instr3_64B cmd3;
};
-#define OCT_64B_INSTR_SIZE (sizeof(struct octeon_instr_64B))
+#define OCT_64B_INSTR_SIZE (sizeof(union octeon_instr_64B))
/** The size of each buffer in soft command buffer pool
*/
-#define SOFT_COMMAND_BUFFER_SIZE 1024
+#define SOFT_COMMAND_BUFFER_SIZE 1536
struct octeon_soft_command {
/** Soft command buffer info. */
@@ -214,7 +264,8 @@ struct octeon_soft_command {
u32 size;
/** Command and return status */
- struct octeon_instr_64B cmd;
+ union octeon_instr_64B cmd;
+
#define COMPLETION_WORD_INIT 0xffffffffffffffffULL
u64 *status_word;
@@ -242,7 +293,7 @@ struct octeon_soft_command {
/** Maximum number of buffers to allocate into soft command buffer pool
*/
-#define MAX_SOFT_COMMAND_BUFFERS 16
+#define MAX_SOFT_COMMAND_BUFFERS 256
/** Head of a soft command buffer pool.
*/
@@ -268,14 +319,15 @@ void octeon_free_soft_command(struct octeon_device *oct,
/**
* octeon_init_instr_queue()
* @param octeon_dev - pointer to the octeon device structure.
- * @param iq_no - queue to be initialized (0 <= q_no <= 3).
+ * @param txpciq - queue to be initialized (0 <= q_no <= 3).
*
* Called at driver init time for each input queue. iq_conf has the
* configuration parameters for the queue.
*
* @return Success: 0 Failure: 1
*/
-int octeon_init_instr_queue(struct octeon_device *octeon_dev, u32 iq_no,
+int octeon_init_instr_queue(struct octeon_device *octeon_dev,
+ union oct_txpciq txpciq,
u32 num_descs);
/**
@@ -298,7 +350,7 @@ octeon_register_reqtype_free_fn(struct octeon_device *oct, int reqtype,
int
lio_process_iq_request_list(struct octeon_device *oct,
- struct octeon_instr_queue *iq);
+ struct octeon_instr_queue *iq, u32 napi_budget);
int octeon_send_command(struct octeon_device *oct, u32 iq_no,
u32 force_db, void *cmd, void *buf,
@@ -313,7 +365,10 @@ void octeon_prepare_soft_command(struct octeon_device *oct,
int octeon_send_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc);
-int octeon_setup_iq(struct octeon_device *oct, u32 iq_no,
- u32 num_descs, void *app_ctx);
-
+int octeon_setup_iq(struct octeon_device *oct, int ifidx,
+ int q_index, union oct_txpciq iq_no, u32 num_descs,
+ void *app_ctx);
+int
+octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
+ u32 pending_thresh, u32 napi_budget);
#endif /* __OCTEON_IQ_H__ */
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_main.h b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
index cbd081981180..bc14e4c27332 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_main.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_main.h
@@ -126,22 +126,27 @@ static inline int octeon_map_pci_barx(struct octeon_device *oct,
}
static inline void *
-cnnic_alloc_aligned_dma(struct pci_dev *pci_dev,
- u32 size,
- u32 *alloc_size,
- size_t *orig_ptr,
- size_t *dma_addr __attribute__((unused)))
+cnnic_numa_alloc_aligned_dma(u32 size,
+ u32 *alloc_size,
+ size_t *orig_ptr,
+ int numa_node)
{
int retries = 0;
void *ptr = NULL;
#define OCTEON_MAX_ALLOC_RETRIES 1
do {
- ptr =
- (void *)__get_free_pages(GFP_KERNEL,
- get_order(size));
+ struct page *page = NULL;
+
+ page = alloc_pages_node(numa_node,
+ GFP_KERNEL,
+ get_order(size));
+ if (!page)
+ page = alloc_pages(GFP_KERNEL,
+ get_order(size));
+ ptr = (void *)page_address(page);
if ((unsigned long)ptr & 0x07) {
- free_pages((unsigned long)ptr, get_order(size));
+ __free_pages(page, get_order(size));
ptr = NULL;
/* Increment the size required if the first
* attempt failed.
@@ -169,7 +174,7 @@ sleep_cond(wait_queue_head_t *wait_queue, int *condition)
init_waitqueue_entry(&we, current);
add_wait_queue(wait_queue, &we);
- while (!(ACCESS_ONCE(*condition))) {
+ while (!(READ_ONCE(*condition))) {
set_current_state(TASK_INTERRUPTIBLE);
if (signal_pending(current))
goto out;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
index 5aecef870377..95a4bbedf557 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mem_ops.c
@@ -19,43 +19,29 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
-#include "octeon_main.h"
-#include "octeon_network.h"
-#include "cn66xx_regs.h"
-#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
#define MEMOPS_IDX MAX_BAR1_MAP_INDEX
+#ifdef __BIG_ENDIAN_BITFIELD
static inline void
-octeon_toggle_bar1_swapmode(struct octeon_device *oct __attribute__((unused)),
- u32 idx __attribute__((unused)))
+octeon_toggle_bar1_swapmode(struct octeon_device *oct, u32 idx)
{
-#ifdef __BIG_ENDIAN_BITFIELD
u32 mask;
mask = oct->fn_list.bar1_idx_read(oct, idx);
mask = (mask & 0x2) ? (mask & ~2) : (mask | 2);
oct->fn_list.bar1_idx_write(oct, idx, mask);
-#endif
}
+#else
+#define octeon_toggle_bar1_swapmode(oct, idx) (oct = oct)
+#endif
static void
octeon_pci_fastwrite(struct octeon_device *oct, u8 __iomem *mapped_addr,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_network.h b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
index b3abe5818fd3..fb820dc7fcb7 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_network.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_network.h
@@ -30,6 +30,20 @@
#include <linux/dma-mapping.h>
#include <linux/ptp_clock_kernel.h>
+#define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE)
+#define LIO_MIN_MTU_SIZE 68
+
+struct oct_nic_stats_resp {
+ u64 rh;
+ struct oct_link_stats stats;
+ u64 status;
+};
+
+struct oct_nic_stats_ctrl {
+ struct completion complete;
+ struct net_device *netdev;
+};
+
/** LiquidIO per-interface network private data */
struct lio {
/** State of the interface. Rx/Tx happens only in the RUNNING state. */
@@ -48,11 +62,11 @@ struct lio {
*/
int rxq;
- /** Guards the glist */
- spinlock_t lock;
+ /** Guards each glist */
+ spinlock_t *glist_lock;
- /** Linked list of gather components */
- struct list_head glist;
+ /** Array of gather component linked lists */
+ struct list_head *glist;
/** Pointer to the NIC properties for the Octeon device this network
* interface is associated with.
@@ -67,6 +81,9 @@ struct lio {
/** Link information sent by the core application for this interface. */
struct oct_link_info linfo;
+ /** counter of link changes */
+ u64 link_changes;
+
/** Size of Tx queue for this octeon device. */
u32 tx_qsize;
@@ -82,6 +99,12 @@ struct lio {
/** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */
u64 dev_capability;
+ /* Copy of transmit encapsulation capabilities:
+ * TSO, TSO6, Checksums for this device for Kernel
+ * 3.10.0 onwards
+ */
+ u64 enc_dev_capability;
+
/** Copy of beacaon reg in phy */
u32 phy_beacon_val;
@@ -101,7 +124,6 @@ struct lio {
/* work queue for txq status */
struct cavium_wq txq_status_wq;
-
};
#define LIO_SIZE (sizeof(struct lio))
@@ -111,8 +133,9 @@ struct lio {
* \brief Enable or disable feature
* @param netdev pointer to network device
* @param cmd Command that just requires acknowledgment
+ * @param param1 Parameter to command
*/
-int liquidio_set_feature(struct net_device *netdev, int cmd);
+int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1);
/**
* \brief Link control command completion callback
@@ -131,14 +154,30 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr);
*/
void liquidio_set_ethtool_ops(struct net_device *netdev);
-static inline void
-*recv_buffer_alloc(struct octeon_device *oct __attribute__((unused)),
- u32 q_no __attribute__((unused)), u32 size)
-{
#define SKB_ADJ_MASK 0x3F
#define SKB_ADJ (SKB_ADJ_MASK + 1)
- struct sk_buff *skb = dev_alloc_skb(size + SKB_ADJ);
+#define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */
+#define LIO_RXBUFFER_SZ 2048
+
+static inline void
+*recv_buffer_alloc(struct octeon_device *oct,
+ struct octeon_skb_page_info *pg_info)
+{
+ struct page *page;
+ struct sk_buff *skb;
+ struct octeon_skb_page_info *skb_pg_info;
+
+ page = alloc_page(GFP_ATOMIC | __GFP_COLD);
+ if (unlikely(!page))
+ return NULL;
+
+ skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
+ if (unlikely(!skb)) {
+ __free_page(page);
+ pg_info->page = NULL;
+ return NULL;
+ }
if ((unsigned long)skb->data & SKB_ADJ_MASK) {
u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
@@ -146,11 +185,151 @@ static inline void
skb_reserve(skb, r);
}
+ skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ /* Get DMA info */
+ pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+
+ /* Mapping failed!! */
+ if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) {
+ __free_page(page);
+ dev_kfree_skb_any((struct sk_buff *)skb);
+ pg_info->page = NULL;
+ return NULL;
+ }
+
+ pg_info->page = page;
+ pg_info->page_offset = 0;
+ skb_pg_info->page = page;
+ skb_pg_info->page_offset = 0;
+ skb_pg_info->dma = pg_info->dma;
+
return (void *)skb;
}
+static inline void
+*recv_buffer_fast_alloc(u32 size)
+{
+ struct sk_buff *skb;
+ struct octeon_skb_page_info *skb_pg_info;
+
+ skb = dev_alloc_skb(size + SKB_ADJ);
+ if (unlikely(!skb))
+ return NULL;
+
+ if ((unsigned long)skb->data & SKB_ADJ_MASK) {
+ u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
+
+ skb_reserve(skb, r);
+ }
+
+ skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ skb_pg_info->page = NULL;
+ skb_pg_info->page_offset = 0;
+ skb_pg_info->dma = 0;
+
+ return skb;
+}
+
+static inline int
+recv_buffer_recycle(struct octeon_device *oct, void *buf)
+{
+ struct octeon_skb_page_info *pg_info = buf;
+
+ if (!pg_info->page) {
+ dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (unlikely(page_count(pg_info->page) != 1) ||
+ unlikely(page_to_nid(pg_info->page) != numa_node_id())) {
+ dma_unmap_page(&oct->pci_dev->dev,
+ pg_info->dma, (PAGE_SIZE << 0),
+ DMA_FROM_DEVICE);
+ pg_info->dma = 0;
+ pg_info->page = NULL;
+ pg_info->page_offset = 0;
+ return -ENOMEM;
+ }
+
+ /* Flip to other half of the buffer */
+ if (pg_info->page_offset == 0)
+ pg_info->page_offset = LIO_RXBUFFER_SZ;
+ else
+ pg_info->page_offset = 0;
+ page_ref_inc(pg_info->page);
+
+ return 0;
+}
+
+static inline void
+*recv_buffer_reuse(struct octeon_device *oct, void *buf)
+{
+ struct octeon_skb_page_info *pg_info = buf, *skb_pg_info;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ);
+ if (unlikely(!skb)) {
+ dma_unmap_page(&oct->pci_dev->dev,
+ pg_info->dma, (PAGE_SIZE << 0),
+ DMA_FROM_DEVICE);
+ return NULL;
+ }
+
+ if ((unsigned long)skb->data & SKB_ADJ_MASK) {
+ u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK);
+
+ skb_reserve(skb, r);
+ }
+
+ skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ skb_pg_info->page = pg_info->page;
+ skb_pg_info->page_offset = pg_info->page_offset;
+ skb_pg_info->dma = pg_info->dma;
+
+ return skb;
+}
+
+static inline void
+recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info)
+{
+ struct sk_buff *skb = (struct sk_buff *)buffer;
+
+ put_page(pg_info->page);
+ pg_info->dma = 0;
+ pg_info->page = NULL;
+ pg_info->page_offset = 0;
+
+ if (skb)
+ dev_kfree_skb_any(skb);
+}
+
static inline void recv_buffer_free(void *buffer)
{
+ struct sk_buff *skb = (struct sk_buff *)buffer;
+ struct octeon_skb_page_info *pg_info;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+
+ if (pg_info->page) {
+ put_page(pg_info->page);
+ pg_info->dma = 0;
+ pg_info->page = NULL;
+ pg_info->page_offset = 0;
+ }
+
+ dev_kfree_skb_any((struct sk_buff *)buffer);
+}
+
+static inline void
+recv_buffer_fast_free(void *buffer)
+{
+ dev_kfree_skb_any((struct sk_buff *)buffer);
+}
+
+static inline void tx_buffer_free(void *buffer)
+{
dev_kfree_skb_any((struct sk_buff *)buffer);
}
@@ -159,7 +338,17 @@ static inline void recv_buffer_free(void *buffer)
#define lio_dma_free(oct, size, virt_addr, dma_addr) \
dma_free_coherent(&oct->pci_dev->dev, size, virt_addr, dma_addr)
-#define get_rbd(ptr) (((struct sk_buff *)(ptr))->data)
+static inline
+void *get_rbd(struct sk_buff *skb)
+{
+ struct octeon_skb_page_info *pg_info;
+ unsigned char *va;
+
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ va = page_address(pg_info->page) + pg_info->page_offset;
+
+ return va;
+}
static inline u64
lio_map_ring_info(struct octeon_droq *droq, u32 i)
@@ -170,7 +359,7 @@ lio_map_ring_info(struct octeon_droq *droq, u32 i)
dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
- BUG_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
+ WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
return (u64)dma_addr;
}
@@ -183,33 +372,44 @@ lio_unmap_ring_info(struct pci_dev *pci_dev,
}
static inline u64
-lio_map_ring(struct pci_dev *pci_dev,
- void *buf, u32 size)
+lio_map_ring(void *buf)
{
dma_addr_t dma_addr;
- dma_addr = dma_map_single(&pci_dev->dev, get_rbd(buf), size,
- DMA_FROM_DEVICE);
+ struct sk_buff *skb = (struct sk_buff *)buf;
+ struct octeon_skb_page_info *pg_info;
- BUG_ON(dma_mapping_error(&pci_dev->dev, dma_addr));
+ pg_info = ((struct octeon_skb_page_info *)(skb->cb));
+ if (!pg_info->page) {
+ pr_err("%s: pg_info->page NULL\n", __func__);
+ WARN_ON(1);
+ }
+
+ /* Get DMA info */
+ dma_addr = pg_info->dma;
+ if (!pg_info->dma) {
+ pr_err("%s: ERROR it should be already available\n",
+ __func__);
+ WARN_ON(1);
+ }
+ dma_addr += pg_info->page_offset;
return (u64)dma_addr;
}
static inline void
lio_unmap_ring(struct pci_dev *pci_dev,
- u64 buf_ptr, u32 size)
+ u64 buf_ptr)
+
{
- dma_unmap_single(&pci_dev->dev,
- buf_ptr, size,
- DMA_FROM_DEVICE);
+ dma_unmap_page(&pci_dev->dev,
+ buf_ptr, (PAGE_SIZE << 0),
+ DMA_FROM_DEVICE);
}
-static inline void *octeon_fast_packet_alloc(struct octeon_device *oct,
- struct octeon_droq *droq,
- u32 q_no, u32 size)
+static inline void *octeon_fast_packet_alloc(u32 size)
{
- return recv_buffer_alloc(oct, q_no, size);
+ return recv_buffer_fast_alloc(size);
}
static inline void octeon_fast_packet_next(struct octeon_droq *droq,
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
index 1a0191549cb3..166727be928f 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c
@@ -19,14 +19,9 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
@@ -34,21 +29,14 @@
#include "octeon_device.h"
#include "octeon_nic.h"
#include "octeon_main.h"
-#include "octeon_network.h"
-#include "cn66xx_regs.h"
-#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
-#include "octeon_mem_ops.h"
void *
octeon_alloc_soft_command_resp(struct octeon_device *oct,
- struct octeon_instr_64B *cmd,
- size_t rdatasize)
+ union octeon_instr_64B *cmd,
+ u32 rdatasize)
{
struct octeon_soft_command *sc;
- struct octeon_instr_ih *ih;
+ struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
@@ -59,24 +47,25 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct,
return NULL;
/* Copy existing command structure into the soft command */
- memcpy(&sc->cmd, cmd, sizeof(struct octeon_instr_64B));
+ memcpy(&sc->cmd, cmd, sizeof(union octeon_instr_64B));
/* Add in the response related fields. Opcode and Param are already
* there.
*/
- ih = (struct octeon_instr_ih *)&sc->cmd.ih;
- ih->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
+ ih2->fsz = 40; /* irh + ossp[0] + ossp[1] + rdp + rptr = 40 bytes */
- irh = (struct octeon_instr_irh *)&sc->cmd.irh;
irh->rflag = 1; /* a response is required */
- irh->len = 4; /* means four 64-bit words immediately follow irh */
- rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
rdp->pcie_port = oct->pcie_port;
rdp->rlen = rdatasize;
*sc->status_word = COMPLETION_WORD_INIT;
+ sc->cmd.cmd2.rptr = sc->dmarptr;
+
sc->wait_time = 1000;
sc->timeout = jiffies + sc->wait_time;
@@ -119,12 +108,11 @@ static void octnet_link_ctrl_callback(struct octeon_device *oct,
static inline struct octeon_soft_command
*octnic_alloc_ctrl_pkt_sc(struct octeon_device *oct,
- struct octnic_ctrl_pkt *nctrl,
- struct octnic_ctrl_params nparams)
+ struct octnic_ctrl_pkt *nctrl)
{
struct octeon_soft_command *sc = NULL;
u8 *data;
- size_t rdatasize;
+ u32 rdatasize;
u32 uddsize = 0, datasize = 0;
uddsize = (u32)(nctrl->ncmd.s.more * 8);
@@ -143,7 +131,7 @@ static inline struct octeon_soft_command
data = (u8 *)sc->virtdptr;
- memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE);
+ memcpy(data, &nctrl->ncmd, OCTNET_CMD_SIZE);
octeon_swap_8B_data((u64 *)data, (OCTNET_CMD_SIZE >> 3));
@@ -152,6 +140,8 @@ static inline struct octeon_soft_command
memcpy(data + OCTNET_CMD_SIZE, nctrl->udd, uddsize);
}
+ sc->iq_no = (u32)nctrl->iq_no;
+
octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_CMD,
0, 0, 0);
@@ -164,26 +154,41 @@ static inline struct octeon_soft_command
int
octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
- struct octnic_ctrl_pkt *nctrl,
- struct octnic_ctrl_params nparams)
+ struct octnic_ctrl_pkt *nctrl)
{
int retval;
struct octeon_soft_command *sc = NULL;
- sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl, nparams);
+ spin_lock_bh(&oct->cmd_resp_wqlock);
+ /* Allow only rx ctrl command to stop traffic on the chip
+ * during offline operations
+ */
+ if ((oct->cmd_resp_state == OCT_DRV_OFFLINE) &&
+ (nctrl->ncmd.s.cmd != OCTNET_CMD_RX_CTL)) {
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
+ dev_err(&oct->pci_dev->dev,
+ "%s cmd:%d not processed since driver offline\n",
+ __func__, nctrl->ncmd.s.cmd);
+ return -1;
+ }
+
+ sc = octnic_alloc_ctrl_pkt_sc(oct, nctrl);
if (!sc) {
dev_err(&oct->pci_dev->dev, "%s soft command alloc failed\n",
__func__);
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
return -1;
}
retval = octeon_send_soft_command(oct, sc);
- if (retval) {
+ if (retval == IQ_SEND_FAILED) {
octeon_free_soft_command(oct, sc);
- dev_err(&oct->pci_dev->dev, "%s soft command send failed status: %x\n",
- __func__, retval);
+ dev_err(&oct->pci_dev->dev, "%s soft command:%d send failed status: %x\n",
+ __func__, nctrl->ncmd.s.cmd, retval);
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
return -1;
}
+ spin_unlock_bh(&oct->cmd_resp_wqlock);
return retval;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
index 0238857c8105..b71a2bbe4bee 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.h
@@ -52,6 +52,9 @@ struct octnic_ctrl_pkt {
/** Additional data that may be needed by some commands. */
u64 udd[MAX_NCTRL_UDD];
+ /** Input queue to use to send this command. */
+ u64 iq_no;
+
/** Time to wait for Octeon software to respond to this control command.
* If wait_time is 0, OSI assumes no response is expected.
*/
@@ -82,7 +85,7 @@ struct octnic_data_pkt {
u32 datasize;
/** Command to be passed to the Octeon device software. */
- struct octeon_instr_64B cmd;
+ union octeon_instr_64B cmd;
/** Input queue to use to send this command. */
u32 q_no;
@@ -94,15 +97,14 @@ struct octnic_data_pkt {
*/
union octnic_cmd_setup {
struct {
- u32 ifidx:8;
- u32 cksum_offset:7;
+ u32 iq_no:8;
u32 gather:1;
u32 timestamp:1;
- u32 ipv4opts_ipv6exthdr:2;
u32 ip_csum:1;
+ u32 transport_csum:1;
u32 tnl_csum:1;
+ u32 rsvd:19;
- u32 rsvd:11;
union {
u32 datasize;
u32 gatherptrs;
@@ -113,79 +115,146 @@ union octnic_cmd_setup {
};
-struct octnic_ctrl_params {
- u32 resp_order;
-};
-
static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no)
{
return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending)
>= (oct->instr_queue[q_no]->max_count - 2));
}
-/** Utility function to prepare a 64B NIC instruction based on a setup command
- * @param cmd - pointer to instruction to be filled in.
- * @param setup - pointer to the setup structure
- * @param q_no - which queue for back pressure
- *
- * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
- */
static inline void
-octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd,
- union octnic_cmd_setup *setup, u32 tag)
+octnet_prepare_pci_cmd_o2(struct octeon_device *oct,
+ union octeon_instr_64B *cmd,
+ union octnic_cmd_setup *setup, u32 tag)
{
- struct octeon_instr_ih *ih;
+ struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
union octnic_packet_params packet_params;
+ int port;
- memset(cmd, 0, sizeof(struct octeon_instr_64B));
+ memset(cmd, 0, sizeof(union octeon_instr_64B));
- ih = (struct octeon_instr_ih *)&cmd->ih;
+ ih2 = (struct octeon_instr_ih2 *)&cmd->cmd2.ih2;
/* assume that rflag is cleared so therefore front data will only have
- * irh and ossp[1] and ossp[2] for a total of 24 bytes
+ * irh and ossp[0], ossp[1] for a total of 32 bytes
*/
- ih->fsz = 24;
+ ih2->fsz = 24;
+
+ ih2->tagtype = ORDERED_TAG;
+ ih2->grp = DEFAULT_POW_GRP;
- ih->tagtype = ORDERED_TAG;
- ih->grp = DEFAULT_POW_GRP;
+ port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port;
if (tag)
- ih->tag = tag;
+ ih2->tag = tag;
else
- ih->tag = LIO_DATA(setup->s.ifidx);
+ ih2->tag = LIO_DATA(port);
- ih->raw = 1;
- ih->qos = (setup->s.ifidx & 3) + 4; /* map qos based on interface */
+ ih2->raw = 1;
+ ih2->qos = (port & 3) + 4; /* map qos based on interface */
if (!setup->s.gather) {
- ih->dlengsz = setup->s.u.datasize;
+ ih2->dlengsz = setup->s.u.datasize;
} else {
- ih->gather = 1;
- ih->dlengsz = setup->s.u.gatherptrs;
+ ih2->gather = 1;
+ ih2->dlengsz = setup->s.u.gatherptrs;
}
- irh = (struct octeon_instr_irh *)&cmd->irh;
+ irh = (struct octeon_instr_irh *)&cmd->cmd2.irh;
irh->opcode = OPCODE_NIC;
irh->subcode = OPCODE_NIC_NW_DATA;
packet_params.u32 = 0;
- if (setup->s.cksum_offset) {
- packet_params.s.csoffset = setup->s.cksum_offset;
- packet_params.s.ipv4opts_ipv6exthdr =
- setup->s.ipv4opts_ipv6exthdr;
+ packet_params.s.ip_csum = setup->s.ip_csum;
+ packet_params.s.transport_csum = setup->s.transport_csum;
+ packet_params.s.tnl_csum = setup->s.tnl_csum;
+ packet_params.s.tsflag = setup->s.timestamp;
+
+ irh->ossp = packet_params.u32;
+}
+
+static inline void
+octnet_prepare_pci_cmd_o3(struct octeon_device *oct,
+ union octeon_instr_64B *cmd,
+ union octnic_cmd_setup *setup, u32 tag)
+{
+ struct octeon_instr_irh *irh;
+ struct octeon_instr_ih3 *ih3;
+ struct octeon_instr_pki_ih3 *pki_ih3;
+ union octnic_packet_params packet_params;
+ int port;
+
+ memset(cmd, 0, sizeof(union octeon_instr_64B));
+
+ ih3 = (struct octeon_instr_ih3 *)&cmd->cmd3.ih3;
+ pki_ih3 = (struct octeon_instr_pki_ih3 *)&cmd->cmd3.pki_ih3;
+
+ /* assume that rflag is cleared so therefore front data will only have
+ * irh and ossp[1] and ossp[2] for a total of 24 bytes
+ */
+ ih3->pkind = oct->instr_queue[setup->s.iq_no]->txpciq.s.pkind;
+ /*PKI IH*/
+ ih3->fsz = 24 + 8;
+
+ if (!setup->s.gather) {
+ ih3->dlengsz = setup->s.u.datasize;
+ } else {
+ ih3->gather = 1;
+ ih3->dlengsz = setup->s.u.gatherptrs;
}
+ pki_ih3->w = 1;
+ pki_ih3->raw = 1;
+ pki_ih3->utag = 1;
+ pki_ih3->utt = 1;
+ pki_ih3->uqpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.use_qpg;
+
+ port = (int)oct->instr_queue[setup->s.iq_no]->txpciq.s.port;
+
+ if (tag)
+ pki_ih3->tag = tag;
+ else
+ pki_ih3->tag = LIO_DATA(port);
+
+ pki_ih3->tagtype = ORDERED_TAG;
+ pki_ih3->qpg = oct->instr_queue[setup->s.iq_no]->txpciq.s.qpg;
+ pki_ih3->pm = 0x7; /*0x7 - meant for Parse nothing, uninterpreted*/
+ pki_ih3->sl = 8; /* sl will be sizeof(pki_ih3)*/
+
+ irh = (struct octeon_instr_irh *)&cmd->cmd3.irh;
+
+ irh->opcode = OPCODE_NIC;
+ irh->subcode = OPCODE_NIC_NW_DATA;
+
+ packet_params.u32 = 0;
+
packet_params.s.ip_csum = setup->s.ip_csum;
+ packet_params.s.transport_csum = setup->s.transport_csum;
packet_params.s.tnl_csum = setup->s.tnl_csum;
- packet_params.s.ifidx = setup->s.ifidx;
packet_params.s.tsflag = setup->s.timestamp;
irh->ossp = packet_params.u32;
}
+/** Utility function to prepare a 64B NIC instruction based on a setup command
+ * @param cmd - pointer to instruction to be filled in.
+ * @param setup - pointer to the setup structure
+ * @param q_no - which queue for back pressure
+ *
+ * Assumes the cmd instruction is pre-allocated, but no fields are filled in.
+ */
+static inline void
+octnet_prepare_pci_cmd(struct octeon_device *oct, union octeon_instr_64B *cmd,
+ union octnic_cmd_setup *setup, u32 tag)
+{
+ if (OCTEON_CN6XXX(oct))
+ octnet_prepare_pci_cmd_o2(oct, cmd, setup, tag);
+ else
+ octnet_prepare_pci_cmd_o3(oct, cmd, setup, tag);
+}
+
/** Allocate and a soft command with space for a response immediately following
* the commnad.
* @param oct - octeon device pointer
@@ -198,8 +267,8 @@ octnet_prepare_pci_cmd(struct octeon_instr_64B *cmd,
*/
void *
octeon_alloc_soft_command_resp(struct octeon_device *oct,
- struct octeon_instr_64B *cmd,
- size_t rdatasize);
+ union octeon_instr_64B *cmd,
+ u32 rdatasize);
/** Send a NIC data packet to the device
* @param oct - octeon device pointer
@@ -214,14 +283,11 @@ int octnet_send_nic_data_pkt(struct octeon_device *oct,
/** Send a NIC control packet to the device
* @param oct - octeon device pointer
* @param nctrl - control structure with command, timout, and callback info
- * @param nparams - response control structure
- *
* @returns IQ_FAILED if it failed to add to the input queue. IQ_STOP if it the
* queue should be stopped, and IQ_SEND_OK if it sent okay.
*/
int
octnet_send_nic_ctrl_pkt(struct octeon_device *oct,
- struct octnic_ctrl_pkt *nctrl,
- struct octnic_ctrl_params nparams);
+ struct octnic_ctrl_pkt *nctrl);
#endif
diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
index a2a24652c8f3..d32492f185ff 100644
--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
@@ -19,28 +19,17 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
#include <linux/vmalloc.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
#include "octeon_network.h"
-#include "cn66xx_regs.h"
#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
#define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count) \
(octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
@@ -51,7 +40,7 @@ struct iq_post_status {
};
static void check_db_timeout(struct work_struct *work);
-static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no);
+static void __check_db_timeout(struct octeon_device *oct, u64 iq_no);
static void (*reqtype_free_fn[MAX_OCTEON_DEVICES][REQTYPE_LAST + 1]) (void *);
@@ -69,12 +58,16 @@ static inline int IQ_INSTR_MODE_64B(struct octeon_device *oct, int iq_no)
/* Return 0 on success, 1 on failure */
int octeon_init_instr_queue(struct octeon_device *oct,
- u32 iq_no, u32 num_descs)
+ union oct_txpciq txpciq,
+ u32 num_descs)
{
struct octeon_instr_queue *iq;
struct octeon_iq_config *conf = NULL;
+ u32 iq_no = (u32)txpciq.s.q_no;
u32 q_size;
struct cavium_wq *db_wq;
+ int orig_node = dev_to_node(&oct->pci_dev->dev);
+ int numa_node = cpu_to_node(iq_no % num_online_cpus());
if (OCTEON_CN6XXX(oct))
conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));
@@ -95,9 +88,15 @@ int octeon_init_instr_queue(struct octeon_device *oct,
q_size = (u32)conf->instr_type * num_descs;
iq = oct->instr_queue[iq_no];
+ iq->oct_dev = oct;
+ set_dev_node(&oct->pci_dev->dev, numa_node);
iq->base_addr = lio_dma_alloc(oct, q_size,
(dma_addr_t *)&iq->base_addr_dma);
+ set_dev_node(&oct->pci_dev->dev, orig_node);
+ if (!iq->base_addr)
+ iq->base_addr = lio_dma_alloc(oct, q_size,
+ (dma_addr_t *)&iq->base_addr_dma);
if (!iq->base_addr) {
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
iq_no);
@@ -109,7 +108,11 @@ int octeon_init_instr_queue(struct octeon_device *oct,
/* Initialize a list to holds requests that have been posted to Octeon
* but has yet to be fetched by octeon
*/
- iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs);
+ iq->request_list = vmalloc_node((sizeof(*iq->request_list) * num_descs),
+ numa_node);
+ if (!iq->request_list)
+ iq->request_list = vmalloc(sizeof(*iq->request_list) *
+ num_descs);
if (!iq->request_list) {
lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
@@ -122,7 +125,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);
- iq->iq_no = iq_no;
+ iq->txpciq.u64 = txpciq.u64;
iq->fill_threshold = (u32)conf->db_min;
iq->fill_cnt = 0;
iq->host_write_index = 0;
@@ -135,8 +138,11 @@ int octeon_init_instr_queue(struct octeon_device *oct,
/* Initialize the spinlock for this instruction queue */
spin_lock_init(&iq->lock);
+ spin_lock_init(&iq->post_lock);
- oct->io_qmask.iq |= (1 << iq_no);
+ spin_lock_init(&iq->iq_flush_running_lock);
+
+ oct->io_qmask.iq |= (1ULL << iq_no);
/* Set the 32B/64B mode for each input queue */
oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
@@ -144,7 +150,9 @@ int octeon_init_instr_queue(struct octeon_device *oct,
oct->fn_list.setup_iq_regs(oct, iq_no);
- oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db");
+ oct->check_db_wq[iq_no].wq = alloc_workqueue("check_iq_db",
+ WQ_MEM_RECLAIM,
+ 0);
if (!oct->check_db_wq[iq_no].wq) {
lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
@@ -168,7 +176,6 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
- flush_workqueue(oct->check_db_wq[iq_no].wq);
destroy_workqueue(oct->check_db_wq[iq_no].wq);
if (OCTEON_CN6XXX(oct))
@@ -188,26 +195,38 @@ int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
/* Return 0 on success, 1 on failure */
int octeon_setup_iq(struct octeon_device *oct,
- u32 iq_no,
+ int ifidx,
+ int q_index,
+ union oct_txpciq txpciq,
u32 num_descs,
void *app_ctx)
{
+ u32 iq_no = (u32)txpciq.s.q_no;
+ int numa_node = cpu_to_node(iq_no % num_online_cpus());
+
if (oct->instr_queue[iq_no]) {
dev_dbg(&oct->pci_dev->dev, "IQ is in use. Cannot create the IQ: %d again\n",
iq_no);
+ oct->instr_queue[iq_no]->txpciq.u64 = txpciq.u64;
oct->instr_queue[iq_no]->app_ctx = app_ctx;
return 0;
}
oct->instr_queue[iq_no] =
- vmalloc(sizeof(struct octeon_instr_queue));
+ vmalloc_node(sizeof(struct octeon_instr_queue), numa_node);
+ if (!oct->instr_queue[iq_no])
+ oct->instr_queue[iq_no] =
+ vmalloc(sizeof(struct octeon_instr_queue));
if (!oct->instr_queue[iq_no])
return 1;
memset(oct->instr_queue[iq_no], 0,
sizeof(struct octeon_instr_queue));
+ oct->instr_queue[iq_no]->q_index = q_index;
oct->instr_queue[iq_no]->app_ctx = app_ctx;
- if (octeon_init_instr_queue(oct, iq_no, num_descs)) {
+ oct->instr_queue[iq_no]->ifidx = ifidx;
+
+ if (octeon_init_instr_queue(oct, txpciq, num_descs)) {
vfree(oct->instr_queue[iq_no]);
oct->instr_queue[iq_no] = NULL;
return 1;
@@ -226,8 +245,8 @@ int lio_wait_for_instr_fetch(struct octeon_device *oct)
instr_cnt = 0;
/*for (i = 0; i < oct->num_iqs; i++) {*/
- for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) {
- if (!(oct->io_qmask.iq & (1UL << i)))
+ for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
+ if (!(oct->io_qmask.iq & (1ULL << i)))
continue;
pending =
atomic_read(&oct->
@@ -271,40 +290,8 @@ static inline void __copy_cmd_into_iq(struct octeon_instr_queue *iq,
memcpy(iqptr, cmd, cmdsize);
}
-static inline int
-__post_command(struct octeon_device *octeon_dev __attribute__((unused)),
- struct octeon_instr_queue *iq,
- u32 force_db __attribute__((unused)), u8 *cmd)
-{
- u32 index = -1;
-
- /* This ensures that the read index does not wrap around to the same
- * position if queue gets full before Octeon could fetch any instr.
- */
- if (atomic_read(&iq->instr_pending) >= (s32)(iq->max_count - 1))
- return -1;
-
- __copy_cmd_into_iq(iq, cmd);
-
- /* "index" is returned, host_write_index is modified. */
- index = iq->host_write_index;
- INCR_INDEX_BY1(iq->host_write_index, iq->max_count);
- iq->fill_cnt++;
-
- /* Flush the command into memory. We need to be sure the data is in
- * memory before indicating that the instruction is pending.
- */
- wmb();
-
- atomic_inc(&iq->instr_pending);
-
- return index;
-}
-
static inline struct iq_post_status
-__post_command2(struct octeon_device *octeon_dev __attribute__((unused)),
- struct octeon_instr_queue *iq,
- u32 force_db __attribute__((unused)), u8 *cmd)
+__post_command2(struct octeon_instr_queue *iq, u8 *cmd)
{
struct iq_post_status st;
@@ -362,17 +349,19 @@ __add_to_request_list(struct octeon_instr_queue *iq,
iq->request_list[idx].reqtype = reqtype;
}
+/* Can only run in process context */
int
lio_process_iq_request_list(struct octeon_device *oct,
- struct octeon_instr_queue *iq)
+ struct octeon_instr_queue *iq, u32 napi_budget)
{
int reqtype;
void *buf;
u32 old = iq->flush_index;
u32 inst_count = 0;
- unsigned pkts_compl = 0, bytes_compl = 0;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
struct octeon_soft_command *sc;
struct octeon_instr_irh *irh;
+ unsigned long flags;
while (old != iq->octeon_read_index) {
reqtype = iq->request_list[old].reqtype;
@@ -394,7 +383,7 @@ lio_process_iq_request_list(struct octeon_device *oct,
case REQTYPE_SOFT_COMMAND:
sc = buf;
- irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
if (irh->rflag) {
/* We're expecting a response from Octeon.
* It's up to lio_process_ordered_list() to
@@ -402,17 +391,22 @@ lio_process_iq_request_list(struct octeon_device *oct,
* command response list because we expect
* a response from Octeon.
*/
- spin_lock_bh(&oct->response_list
- [OCTEON_ORDERED_SC_LIST].lock);
+ spin_lock_irqsave
+ (&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock,
+ flags);
atomic_inc(&oct->response_list
[OCTEON_ORDERED_SC_LIST].
pending_req_count);
list_add_tail(&sc->node, &oct->response_list
[OCTEON_ORDERED_SC_LIST].head);
- spin_unlock_bh(&oct->response_list
- [OCTEON_ORDERED_SC_LIST].lock);
+ spin_unlock_irqrestore
+ (&oct->response_list
+ [OCTEON_ORDERED_SC_LIST].lock,
+ flags);
} else {
if (sc->callback) {
+ /* This callback must not sleep */
sc->callback(oct, OCTEON_REQUEST_DONE,
sc->callback_arg);
}
@@ -430,6 +424,9 @@ lio_process_iq_request_list(struct octeon_device *oct,
skip_this:
inst_count++;
INCR_INDEX_BY1(old, iq->max_count);
+
+ if ((napi_budget) && (inst_count >= napi_budget))
+ break;
}
if (bytes_compl)
octeon_report_tx_completion_to_bql(iq->app_ctx, pkts_compl,
@@ -439,38 +436,63 @@ lio_process_iq_request_list(struct octeon_device *oct,
return inst_count;
}
-static inline void
-update_iq_indices(struct octeon_device *oct, struct octeon_instr_queue *iq)
+/* Can only be called from process context */
+int
+octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
+ u32 pending_thresh, u32 napi_budget)
{
u32 inst_processed = 0;
+ u32 tot_inst_processed = 0;
+ int tx_done = 1;
- /* Calculate how many commands Octeon has read and move the read index
- * accordingly.
- */
- iq->octeon_read_index = oct->fn_list.update_iq_read_idx(oct, iq);
+ if (!spin_trylock(&iq->iq_flush_running_lock))
+ return tx_done;
- /* Move the NORESPONSE requests to the per-device completion list. */
- if (iq->flush_index != iq->octeon_read_index)
- inst_processed = lio_process_iq_request_list(oct, iq);
+ spin_lock_bh(&iq->lock);
- if (inst_processed) {
- atomic_sub(inst_processed, &iq->instr_pending);
- iq->stats.instr_processed += inst_processed;
- }
-}
+ iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq);
-static void
-octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
- u32 pending_thresh)
-{
if (atomic_read(&iq->instr_pending) >= (s32)pending_thresh) {
- spin_lock_bh(&iq->lock);
- update_iq_indices(oct, iq);
- spin_unlock_bh(&iq->lock);
+ do {
+ /* Process any outstanding IQ packets. */
+ if (iq->flush_index == iq->octeon_read_index)
+ break;
+
+ if (napi_budget)
+ inst_processed = lio_process_iq_request_list
+ (oct, iq,
+ napi_budget - tot_inst_processed);
+ else
+ inst_processed =
+ lio_process_iq_request_list(oct, iq, 0);
+
+ if (inst_processed) {
+ atomic_sub(inst_processed, &iq->instr_pending);
+ iq->stats.instr_processed += inst_processed;
+ }
+
+ tot_inst_processed += inst_processed;
+ inst_processed = 0;
+
+ } while (tot_inst_processed < napi_budget);
+
+ if (napi_budget && (tot_inst_processed >= napi_budget))
+ tx_done = 0;
}
+
+ iq->last_db_time = jiffies;
+
+ spin_unlock_bh(&iq->lock);
+
+ spin_unlock(&iq->iq_flush_running_lock);
+
+ return tx_done;
}
-static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
+/* Process instruction queue after timeout.
+ * This routine gets called from a workqueue or when removing the module.
+ */
+static void __check_db_timeout(struct octeon_device *oct, u64 iq_no)
{
struct octeon_instr_queue *iq;
u64 next_time;
@@ -481,24 +503,17 @@ static void __check_db_timeout(struct octeon_device *oct, unsigned long iq_no)
if (!iq)
return;
+ /* return immediately, if no work pending */
+ if (!atomic_read(&iq->instr_pending))
+ return;
/* If jiffies - last_db_time < db_timeout do nothing */
next_time = iq->last_db_time + iq->db_timeout;
if (!time_after(jiffies, (unsigned long)next_time))
return;
iq->last_db_time = jiffies;
- /* Get the lock and prevent tasklets. This routine gets called from
- * the poll thread. Instructions can now be posted in tasklet context
- */
- spin_lock_bh(&iq->lock);
- if (iq->fill_cnt != 0)
- ring_doorbell(oct, iq);
-
- spin_unlock_bh(&iq->lock);
-
/* Flush the instruction queue */
- if (iq->do_auto_flush)
- octeon_flush_iq(oct, iq, 1);
+ octeon_flush_iq(oct, iq, 1, 0);
}
/* Called by the Poll thread at regular intervals to check the instruction
@@ -508,11 +523,12 @@ static void check_db_timeout(struct work_struct *work)
{
struct cavium_wk *wk = (struct cavium_wk *)work;
struct octeon_device *oct = (struct octeon_device *)wk->ctxptr;
- unsigned long iq_no = wk->ctxul;
+ u64 iq_no = wk->ctxul;
struct cavium_wq *db_wq = &oct->check_db_wq[iq_no];
+ u32 delay = 10;
__check_db_timeout(oct, iq_no);
- queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));
+ queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(delay));
}
int
@@ -523,9 +539,12 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
struct iq_post_status st;
struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
- spin_lock_bh(&iq->lock);
+ /* Get the lock and prevent other tasks and tx interrupt handler from
+ * running.
+ */
+ spin_lock_bh(&iq->post_lock);
- st = __post_command2(oct, iq, force_db, cmd);
+ st = __post_command2(iq, cmd);
if (st.status != IQ_SEND_FAILED) {
octeon_report_sent_bytes_to_bql(buf, reqtype);
@@ -533,16 +552,19 @@ octeon_send_command(struct octeon_device *oct, u32 iq_no,
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize);
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1);
- if (iq->fill_cnt >= iq->fill_threshold || force_db)
+ if (force_db)
ring_doorbell(oct, iq);
} else {
INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1);
}
- spin_unlock_bh(&iq->lock);
+ spin_unlock_bh(&iq->post_lock);
- if (iq->do_auto_flush)
- octeon_flush_iq(oct, iq, 2);
+ /* This is only done here to expedite packets being flushed
+ * for cases where there are no IQ completion interrupts.
+ */
+ /*if (iq->do_auto_flush)*/
+ /* octeon_flush_iq(oct, iq, 2, 0);*/
return st.status;
}
@@ -557,82 +579,75 @@ octeon_prepare_soft_command(struct octeon_device *oct,
u64 ossp1)
{
struct octeon_config *oct_cfg;
- struct octeon_instr_ih *ih;
+ struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
struct octeon_instr_rdp *rdp;
- BUG_ON(opcode > 15);
- BUG_ON(subcode > 127);
+ WARN_ON(opcode > 15);
+ WARN_ON(subcode > 127);
oct_cfg = octeon_get_conf(oct);
- ih = (struct octeon_instr_ih *)&sc->cmd.ih;
- ih->tagtype = ATOMIC_TAG;
- ih->tag = LIO_CONTROL;
- ih->raw = 1;
- ih->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ ih2->tagtype = ATOMIC_TAG;
+ ih2->tag = LIO_CONTROL;
+ ih2->raw = 1;
+ ih2->grp = CFG_GET_CTRL_Q_GRP(oct_cfg);
if (sc->datasize) {
- ih->dlengsz = sc->datasize;
- ih->rs = 1;
+ ih2->dlengsz = sc->datasize;
+ ih2->rs = 1;
}
- irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
irh->opcode = opcode;
irh->subcode = subcode;
/* opcode/subcode specific parameters (ossp) */
irh->ossp = irh_ossp;
- sc->cmd.ossp[0] = ossp0;
- sc->cmd.ossp[1] = ossp1;
+ sc->cmd.cmd2.ossp[0] = ossp0;
+ sc->cmd.cmd2.ossp[1] = ossp1;
if (sc->rdatasize) {
- rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
rdp->pcie_port = oct->pcie_port;
rdp->rlen = sc->rdatasize;
irh->rflag = 1;
- irh->len = 4;
- ih->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
+ ih2->fsz = 40; /* irh+ossp[0]+ossp[1]+rdp+rptr = 40 bytes */
} else {
irh->rflag = 0;
- irh->len = 2;
- ih->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
+ ih2->fsz = 24; /* irh + ossp[0] + ossp[1] = 24 bytes */
}
-
- while (!(oct->io_qmask.iq & (1 << sc->iq_no)))
- sc->iq_no++;
}
int octeon_send_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
{
- struct octeon_instr_ih *ih;
+ struct octeon_instr_ih2 *ih2;
struct octeon_instr_irh *irh;
- struct octeon_instr_rdp *rdp;
+ u32 len;
- ih = (struct octeon_instr_ih *)&sc->cmd.ih;
- if (ih->dlengsz) {
- BUG_ON(!sc->dmadptr);
- sc->cmd.dptr = sc->dmadptr;
+ ih2 = (struct octeon_instr_ih2 *)&sc->cmd.cmd2.ih2;
+ if (ih2->dlengsz) {
+ WARN_ON(!sc->dmadptr);
+ sc->cmd.cmd2.dptr = sc->dmadptr;
}
-
- irh = (struct octeon_instr_irh *)&sc->cmd.irh;
+ irh = (struct octeon_instr_irh *)&sc->cmd.cmd2.irh;
if (irh->rflag) {
- BUG_ON(!sc->dmarptr);
- BUG_ON(!sc->status_word);
+ WARN_ON(!sc->dmarptr);
+ WARN_ON(!sc->status_word);
*sc->status_word = COMPLETION_WORD_INIT;
- rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
-
- sc->cmd.rptr = sc->dmarptr;
+ sc->cmd.cmd2.rptr = sc->dmarptr;
}
+ len = (u32)ih2->dlengsz;
if (sc->wait_time)
sc->timeout = jiffies + sc->wait_time;
- return octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
- (u32)ih->dlengsz, REQTYPE_SOFT_COMMAND);
+ return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc,
+ len, REQTYPE_SOFT_COMMAND));
}
int octeon_setup_sc_buffer_pool(struct octeon_device *oct)
@@ -667,7 +682,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct)
struct list_head *tmp, *tmp2;
struct octeon_soft_command *sc;
- spin_lock(&oct->sc_buf_pool.lock);
+ spin_lock_bh(&oct->sc_buf_pool.lock);
list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) {
list_del(tmp);
@@ -679,7 +694,7 @@ int octeon_free_sc_buffer_pool(struct octeon_device *oct)
INIT_LIST_HEAD(&oct->sc_buf_pool.head);
- spin_unlock(&oct->sc_buf_pool.lock);
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
return 0;
}
@@ -695,13 +710,13 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc = NULL;
struct list_head *tmp;
- BUG_ON((offset + datasize + rdatasize + ctxsize) >
+ WARN_ON((offset + datasize + rdatasize + ctxsize) >
SOFT_COMMAND_BUFFER_SIZE);
- spin_lock(&oct->sc_buf_pool.lock);
+ spin_lock_bh(&oct->sc_buf_pool.lock);
if (list_empty(&oct->sc_buf_pool.head)) {
- spin_unlock(&oct->sc_buf_pool.lock);
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
return NULL;
}
@@ -712,7 +727,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
atomic_inc(&oct->sc_buf_pool.alloc_buf_count);
- spin_unlock(&oct->sc_buf_pool.lock);
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
sc = (struct octeon_soft_command *)tmp;
@@ -742,7 +757,7 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
offset = (offset + datasize + 127) & 0xffffff80;
if (rdatasize) {
- BUG_ON(rdatasize < 16);
+ WARN_ON(rdatasize < 16);
sc->virtrptr = (u8 *)sc + offset;
sc->dmarptr = dma_addr + offset;
sc->rdatasize = rdatasize;
@@ -755,11 +770,11 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct,
void octeon_free_soft_command(struct octeon_device *oct,
struct octeon_soft_command *sc)
{
- spin_lock(&oct->sc_buf_pool.lock);
+ spin_lock_bh(&oct->sc_buf_pool.lock);
list_add_tail(&sc->node, &oct->sc_buf_pool.head);
atomic_dec(&oct->sc_buf_pool.alloc_buf_count);
- spin_unlock(&oct->sc_buf_pool.lock);
+ spin_unlock_bh(&oct->sc_buf_pool.lock);
}
diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c
index 091f537a946e..709049e36627 100644
--- a/drivers/net/ethernet/cavium/liquidio/response_manager.c
+++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c
@@ -19,28 +19,14 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium, Inc. for more information
**********************************************************************/
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/dma-mapping.h>
#include <linux/pci.h>
-#include <linux/kthread.h>
#include <linux/netdevice.h>
-#include "octeon_config.h"
#include "liquidio_common.h"
#include "octeon_droq.h"
#include "octeon_iq.h"
#include "response_manager.h"
#include "octeon_device.h"
-#include "octeon_nic.h"
#include "octeon_main.h"
-#include "octeon_network.h"
-#include "cn66xx_regs.h"
-#include "cn66xx_device.h"
-#include "cn68xx_regs.h"
-#include "cn68xx_device.h"
-#include "liquidio_image.h"
static void oct_poll_req_completion(struct work_struct *work);
@@ -54,8 +40,9 @@ int octeon_setup_response_list(struct octeon_device *oct)
spin_lock_init(&oct->response_list[i].lock);
atomic_set(&oct->response_list[i].pending_req_count, 0);
}
+ spin_lock_init(&oct->cmd_resp_wqlock);
- oct->dma_comp_wq.wq = create_workqueue("dma-comp");
+ oct->dma_comp_wq.wq = alloc_workqueue("dma-comp", WQ_MEM_RECLAIM, 0);
if (!oct->dma_comp_wq.wq) {
dev_err(&oct->pci_dev->dev, "failed to create wq thread\n");
return -ENOMEM;
@@ -64,7 +51,8 @@ int octeon_setup_response_list(struct octeon_device *oct)
cwq = &oct->dma_comp_wq;
INIT_DELAYED_WORK(&cwq->wk.work, oct_poll_req_completion);
cwq->wk.ctxptr = oct;
- queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
+ oct->cmd_resp_state = OCT_DRV_ONLINE;
+ queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50));
return ret;
}
@@ -72,7 +60,6 @@ int octeon_setup_response_list(struct octeon_device *oct)
void octeon_delete_response_list(struct octeon_device *oct)
{
cancel_delayed_work_sync(&oct->dma_comp_wq.wk.work);
- flush_workqueue(oct->dma_comp_wq.wq);
destroy_workqueue(oct->dma_comp_wq.wq);
}
@@ -86,6 +73,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
u32 status;
u64 status64;
struct octeon_instr_rdp *rdp;
+ u64 rptr;
ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];
@@ -103,7 +91,8 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
sc = (struct octeon_soft_command *)ordered_sc_list->
head.next;
- rdp = (struct octeon_instr_rdp *)&sc->cmd.rdp;
+ rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
+ rptr = sc->cmd.cmd2.rptr;
status = OCTEON_REQUEST_PENDING;
@@ -111,7 +100,7 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev,
* to where rptr is pointing to
*/
dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev,
- sc->cmd.rptr, rdp->rlen,
+ rptr, rdp->rlen,
DMA_FROM_DEVICE);
status64 = *sc->status_word;
@@ -173,6 +162,5 @@ static void oct_poll_req_completion(struct work_struct *work)
struct cavium_wq *cwq = &oct->dma_comp_wq;
lio_process_ordered_list(oct, 0);
-
- queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(100));
+ queue_delayed_work(cwq->wq, &cwq->wk.work, msecs_to_jiffies(50));
}
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index c177c7cec13b..4ab404f45b21 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -146,7 +146,6 @@ struct octeon_mgmt {
struct device *dev;
struct napi_struct napi;
struct tasklet_struct tx_clean_tasklet;
- struct phy_device *phydev;
struct device_node *phy_np;
resource_size_t mix_phys;
resource_size_t mix_size;
@@ -787,14 +786,12 @@ static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
static int octeon_mgmt_ioctl(struct net_device *netdev,
struct ifreq *rq, int cmd)
{
- struct octeon_mgmt *p = netdev_priv(netdev);
-
switch (cmd) {
case SIOCSHWTSTAMP:
return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
default:
- if (p->phydev)
- return phy_mii_ioctl(p->phydev, rq, cmd);
+ if (netdev->phydev)
+ return phy_mii_ioctl(netdev->phydev, rq, cmd);
return -EINVAL;
}
}
@@ -836,16 +833,18 @@ static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
static void octeon_mgmt_update_link(struct octeon_mgmt *p)
{
+ struct net_device *ndev = p->netdev;
+ struct phy_device *phydev = ndev->phydev;
union cvmx_agl_gmx_prtx_cfg prtx_cfg;
prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
- if (!p->phydev->link)
+ if (!phydev->link)
prtx_cfg.s.duplex = 1;
else
- prtx_cfg.s.duplex = p->phydev->duplex;
+ prtx_cfg.s.duplex = phydev->duplex;
- switch (p->phydev->speed) {
+ switch (phydev->speed) {
case 10:
prtx_cfg.s.speed = 0;
prtx_cfg.s.slottime = 0;
@@ -871,7 +870,7 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p)
prtx_cfg.s.speed_msb = 0;
/* Only matters for half-duplex */
prtx_cfg.s.slottime = 1;
- prtx_cfg.s.burst = p->phydev->duplex;
+ prtx_cfg.s.burst = phydev->duplex;
}
break;
case 0: /* No link */
@@ -894,9 +893,9 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p)
/* MII (both speeds) and RGMII 1000 speed. */
agl_clk.s.clk_cnt = 1;
if (prtx_ctl.s.mode == 0) { /* RGMII mode */
- if (p->phydev->speed == 10)
+ if (phydev->speed == 10)
agl_clk.s.clk_cnt = 50;
- else if (p->phydev->speed == 100)
+ else if (phydev->speed == 100)
agl_clk.s.clk_cnt = 5;
}
cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
@@ -906,39 +905,40 @@ static void octeon_mgmt_update_link(struct octeon_mgmt *p)
static void octeon_mgmt_adjust_link(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
+ struct phy_device *phydev = netdev->phydev;
unsigned long flags;
int link_changed = 0;
- if (!p->phydev)
+ if (!phydev)
return;
spin_lock_irqsave(&p->lock, flags);
- if (!p->phydev->link && p->last_link)
+ if (!phydev->link && p->last_link)
link_changed = -1;
- if (p->phydev->link
- && (p->last_duplex != p->phydev->duplex
- || p->last_link != p->phydev->link
- || p->last_speed != p->phydev->speed)) {
+ if (phydev->link &&
+ (p->last_duplex != phydev->duplex ||
+ p->last_link != phydev->link ||
+ p->last_speed != phydev->speed)) {
octeon_mgmt_disable_link(p);
link_changed = 1;
octeon_mgmt_update_link(p);
octeon_mgmt_enable_link(p);
}
- p->last_link = p->phydev->link;
- p->last_speed = p->phydev->speed;
- p->last_duplex = p->phydev->duplex;
+ p->last_link = phydev->link;
+ p->last_speed = phydev->speed;
+ p->last_duplex = phydev->duplex;
spin_unlock_irqrestore(&p->lock, flags);
if (link_changed != 0) {
if (link_changed > 0) {
pr_info("%s: Link is up - %d/%s\n", netdev->name,
- p->phydev->speed,
- DUPLEX_FULL == p->phydev->duplex ?
+ phydev->speed,
+ phydev->duplex == DUPLEX_FULL ?
"Full" : "Half");
} else {
pr_info("%s: Link is down\n", netdev->name);
@@ -949,6 +949,7 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev)
static int octeon_mgmt_init_phy(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
+ struct phy_device *phydev = NULL;
if (octeon_is_simulation() || p->phy_np == NULL) {
/* No PHYs in the simulator. */
@@ -956,11 +957,11 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
return 0;
}
- p->phydev = of_phy_connect(netdev, p->phy_np,
- octeon_mgmt_adjust_link, 0,
- PHY_INTERFACE_MODE_MII);
+ phydev = of_phy_connect(netdev, p->phy_np,
+ octeon_mgmt_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
- if (!p->phydev)
+ if (!phydev)
return -ENODEV;
return 0;
@@ -1080,9 +1081,9 @@ static int octeon_mgmt_open(struct net_device *netdev)
}
/* Set the mode of the interface, RGMII/MII. */
- if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) {
+ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
union cvmx_agl_prtx_ctl agl_prtx_ctl;
- int rgmii_mode = (p->phydev->supported &
+ int rgmii_mode = (netdev->phydev->supported &
(SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;
agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
@@ -1205,7 +1206,7 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* Configure the port duplex, speed and enables */
octeon_mgmt_disable_link(p);
- if (p->phydev)
+ if (netdev->phydev)
octeon_mgmt_update_link(p);
octeon_mgmt_enable_link(p);
@@ -1214,9 +1215,9 @@ static int octeon_mgmt_open(struct net_device *netdev)
/* PHY is not present in simulator. The carrier is enabled
* while initializing the phy for simulator, leave it enabled.
*/
- if (p->phydev) {
+ if (netdev->phydev) {
netif_carrier_off(netdev);
- phy_start_aneg(p->phydev);
+ phy_start_aneg(netdev->phydev);
}
netif_wake_queue(netdev);
@@ -1244,9 +1245,8 @@ static int octeon_mgmt_stop(struct net_device *netdev)
napi_disable(&p->napi);
netif_stop_queue(netdev);
- if (p->phydev)
- phy_disconnect(p->phydev);
- p->phydev = NULL;
+ if (netdev->phydev)
+ phy_disconnect(netdev->phydev);
netif_carrier_off(netdev);
@@ -1320,7 +1320,7 @@ static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Ring the bell. */
cvmx_write_csr(p->mix + MIX_ORING2, 1);
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
rv = NETDEV_TX_OK;
out:
octeon_mgmt_update_tx_stats(netdev);
@@ -1346,50 +1346,23 @@ static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
}
-static int octeon_mgmt_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct octeon_mgmt *p = netdev_priv(netdev);
-
- if (p->phydev)
- return phy_ethtool_gset(p->phydev, cmd);
-
- return -EOPNOTSUPP;
-}
-
-static int octeon_mgmt_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct octeon_mgmt *p = netdev_priv(netdev);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (p->phydev)
- return phy_ethtool_sset(p->phydev, cmd);
-
- return -EOPNOTSUPP;
-}
-
static int octeon_mgmt_nway_reset(struct net_device *dev)
{
- struct octeon_mgmt *p = netdev_priv(dev);
-
if (!capable(CAP_NET_ADMIN))
return -EPERM;
- if (p->phydev)
- return phy_start_aneg(p->phydev);
+ if (dev->phydev)
+ return phy_start_aneg(dev->phydev);
return -EOPNOTSUPP;
}
static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
.get_drvinfo = octeon_mgmt_get_drvinfo,
- .get_settings = octeon_mgmt_get_settings,
- .set_settings = octeon_mgmt_set_settings,
.nway_reset = octeon_mgmt_nway_reset,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops octeon_mgmt_ops = {
@@ -1540,6 +1513,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
return 0;
err:
+ of_node_put(p->phy_np);
free_netdev(netdev);
return result;
}
@@ -1547,8 +1521,10 @@ err:
static int octeon_mgmt_remove(struct platform_device *pdev)
{
struct net_device *netdev = platform_get_drvdata(pdev);
+ struct octeon_mgmt *p = netdev_priv(netdev);
unregister_netdev(netdev);
+ of_node_put(p->phy_np);
free_netdev(netdev);
return 0;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 83025bb4737c..e29815d9e6f4 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -279,6 +279,7 @@ struct nicvf {
u8 sqs_id;
bool sqs_mode;
bool hw_tso;
+ bool t88;
/* Receive buffer alloc */
u32 rb_page_offset;
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 95f17f8cadac..85cc782b9060 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -251,9 +251,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
int lmac;
u64 lmac_cfg;
- /* Max value that can be set is 60 */
- if (size > 60)
- size = 60;
+ /* There is a issue in HW where-in while sending GSO sized
+ * pkts as part of TSO, if pkt len falls below this size
+ * NIC will zero PAD packet and also updates IP total length.
+ * Hence set this value to lessthan min pkt size of MAC+IP+TCP
+ * headers, BGX will do the padding to transmit 64 byte pkt.
+ */
+ if (size > 52)
+ size = 52;
for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
@@ -499,6 +504,7 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
u32 rr_quantum;
u8 sq_idx = sq->sq_num;
u8 pqs_vnic;
+ int svf;
if (sq->sqs_mode)
pqs_vnic = nic->pqs_vf[vnic];
@@ -511,10 +517,19 @@ static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
/* 24 bytes for FCS, IPG and preamble */
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
- tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ if (!sq->sqs_mode) {
+ tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ } else {
+ for (svf = 0; svf < MAX_SQS_PER_VF; svf++) {
+ if (nic->vf_sqs[pqs_vnic][svf] == vnic)
+ break;
+ }
+ tl4 = (MAX_LMAC_PER_BGX * NIC_TL4_PER_LMAC);
+ tl4 += (lmac * NIC_TL4_PER_LMAC * MAX_SQS_PER_VF);
+ tl4 += (svf * NIC_TL4_PER_LMAC);
+ tl4 += (bgx * NIC_TL4_PER_BGX);
+ }
tl4 += sq_idx;
- if (sq->sqs_mode)
- tl4 += vnic * 8;
tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index afb10e326b4f..fab35a593898 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -170,7 +170,6 @@
#define NIC_QSET_SQ_0_7_DOOR (0x010838)
#define NIC_QSET_SQ_0_7_STATUS (0x010840)
#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
-#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index d2d8ef270142..ad4fddb55421 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -382,7 +382,10 @@ static void nicvf_get_regs(struct net_device *dev,
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
- p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q);
+ /* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which
+ * produces bus errors when read
+ */
+ p[i++] = 0;
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index bfee298fc02a..3240349615bd 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -513,6 +513,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct nicvf *nic = netdev_priv(netdev);
struct snd_queue *sq;
struct sq_hdr_subdesc *hdr;
+ struct sq_hdr_subdesc *tso_sqe;
sq = &nic->qs->sq[cqe_tx->sq_idx];
@@ -527,17 +528,21 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
- /* For TSO offloaded packets only one SQE will have a valid SKB */
if (skb) {
+ /* Check for dummy descriptor used for HW TSO offload on 88xx */
+ if (hdr->dont_send) {
+ /* Get actual TSO descriptors and free them */
+ tso_sqe =
+ (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+ nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
+ }
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb);
dev_consume_skb_any(skb);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
- /* In case of HW TSO, HW sends a CQE for each segment of a TSO
- * packet instead of a single CQE for the whole TSO packet
- * transmitted. Each of this CQE points to the same SQE, so
- * avoid freeing same SQE multiple times.
+ /* In case of SW TSO on 88xx, only last segment will have
+ * a SKB attached, so just free SQEs here.
*/
if (!nic->hw_tso)
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
@@ -1442,7 +1447,7 @@ static void nicvf_reset_task(struct work_struct *work)
nicvf_stop(nic->netdev);
nicvf_open(nic->netdev);
- nic->netdev->trans_start = jiffies;
+ netif_trans_update(nic->netdev);
}
static int nicvf_config_loopback(struct nicvf *nic,
@@ -1502,6 +1507,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct nicvf *nic;
int err, qcount;
+ u16 sdevid;
err = pci_enable_device(pdev);
if (err) {
@@ -1575,6 +1581,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pass1_silicon(nic->pdev))
nic->hw_tso = true;
+ pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ if (sdevid == 0xA134)
+ nic->t88 = true;
+
/* Check if this VF is in QS only mode */
if (nic->sqs_mode)
return 0;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 06b819db51b1..dda3ea3f3bb6 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -23,7 +23,7 @@ static void nicvf_get_page(struct nicvf *nic)
if (!nic->rb_pageref || !nic->rb_page)
return;
- atomic_add(nic->rb_pageref, &nic->rb_page->_count);
+ page_ref_add(nic->rb_page, nic->rb_pageref);
nic->rb_pageref = 0;
}
@@ -938,6 +938,8 @@ static int nicvf_tso_count_subdescs(struct sk_buff *skb)
return num_edescs + sh->gso_segs;
}
+#define POST_CQE_DESC_COUNT 2
+
/* Get the number of SQ descriptors needed to xmit this skb */
static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
{
@@ -948,6 +950,10 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
return subdesc_cnt;
}
+ /* Dummy descriptors to get TSO pkt completion notification */
+ if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
+ subdesc_cnt += POST_CQE_DESC_COUNT;
+
if (skb_shinfo(skb)->nr_frags)
subdesc_cnt += skb_shinfo(skb)->nr_frags;
@@ -965,14 +971,21 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
struct sq_hdr_subdesc *hdr;
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
- sq->skbuff[qentry] = (u64)skb;
-
memset(hdr, 0, SND_QUEUE_DESC_SIZE);
hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
- /* Enable notification via CQE after processing SQE */
- hdr->post_cqe = 1;
- /* No of subdescriptors following this */
- hdr->subdesc_cnt = subdesc_cnt;
+
+ if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
+ /* post_cqe = 0, to avoid HW posting a CQE for every TSO
+ * segment transmitted on 88xx.
+ */
+ hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
+ } else {
+ sq->skbuff[qentry] = (u64)skb;
+ /* Enable notification via CQE after processing SQE */
+ hdr->post_cqe = 1;
+ /* No of subdescriptors following this */
+ hdr->subdesc_cnt = subdesc_cnt;
+ }
hdr->tot_len = len;
/* Offload checksum calculation to HW */
@@ -1023,6 +1036,37 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
gather->addr = data;
}
+/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
+ * packet so that a CQE is posted as a notifation for transmission of
+ * TSO packet.
+ */
+static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
+ int tso_sqe, struct sk_buff *skb)
+{
+ struct sq_imm_subdesc *imm;
+ struct sq_hdr_subdesc *hdr;
+
+ sq->skbuff[qentry] = (u64)skb;
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+ memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+ hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+ /* Enable notification via CQE after processing SQE */
+ hdr->post_cqe = 1;
+ /* There is no packet to transmit here */
+ hdr->dont_send = 1;
+ hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
+ hdr->tot_len = 1;
+ /* Actual TSO header SQE index, needed for cleanup */
+ hdr->rsvd2 = tso_sqe;
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
+ memset(imm, 0, SND_QUEUE_DESC_SIZE);
+ imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
+ imm->len = 1;
+}
+
/* Segment a TSO packet into 'gso_size' segments and append
* them to SQ for transfer
*/
@@ -1096,7 +1140,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
{
int i, size;
- int subdesc_cnt;
+ int subdesc_cnt, tso_sqe = 0;
int sq_num, qentry;
struct queue_set *qs;
struct snd_queue *sq;
@@ -1131,6 +1175,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
/* Add SQ header subdesc */
nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
skb, skb->len);
+ tso_sqe = qentry;
/* Add SQ gather subdescs */
qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1154,6 +1199,11 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
}
doorbell:
+ if (nic->t88 && skb_shinfo(skb)->gso_size) {
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
+ }
+
/* make sure all memory stores are done before ringing doorbell */
smp_wmb();
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index d20539a6d162..63a39ac97d53 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -274,12 +274,14 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac)
static void bgx_lmac_handler(struct net_device *netdev)
{
struct lmac *lmac = container_of(netdev, struct lmac, netdev);
- struct phy_device *phydev = lmac->phydev;
+ struct phy_device *phydev;
int link_changed = 0;
if (!lmac)
return;
+ phydev = lmac->phydev;
+
if (!phydev->link && lmac->last_link)
link_changed = -1;
@@ -549,7 +551,9 @@ static int bgx_xaui_check_link(struct lmac *lmac)
}
/* Clear rcvflt bit (latching high) and read it back */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
if (bgx->use_training) {
@@ -568,13 +572,6 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
- /* Wait for MAC RX to be ready */
- if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
- SMU_RX_CTL_STATUS, true)) {
- dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
- return -1;
- }
-
/* Wait for BGX RX to be idle */
if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
@@ -587,29 +584,30 @@ static int bgx_xaui_check_link(struct lmac *lmac)
return -1;
}
- if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
- dev_err(&bgx->pdev->dev, "Receive fault\n");
- return -1;
- }
-
- /* Receive link is latching low. Force it high and verify it */
- bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
- if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
- SPU_STATUS1_RCV_LNK, false)) {
- dev_err(&bgx->pdev->dev, "SPU receive link down\n");
- return -1;
- }
-
+ /* Clear receive packet disable */
cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
cfg &= ~SPU_MISC_CTL_RX_DIS;
bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
- return 0;
+
+ /* Check for MAC RX faults */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_CTL);
+ /* 0 - Link is okay, 1 - Local fault, 2 - Remote fault */
+ cfg &= SMU_RX_CTL_STATUS;
+ if (!cfg)
+ return 0;
+
+ /* Rx local/remote fault seen.
+ * Do lmac reinit to see if condition recovers
+ */
+ bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type);
+
+ return -1;
}
static void bgx_poll_for_link(struct work_struct *work)
{
struct lmac *lmac;
- u64 link;
+ u64 spu_link, smu_link;
lmac = container_of(work, struct lmac, dwork.work);
@@ -619,8 +617,11 @@ static void bgx_poll_for_link(struct work_struct *work)
bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
SPU_STATUS1_RCV_LNK, false);
- link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
- if (link & SPU_STATUS1_RCV_LNK) {
+ spu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+ smu_link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SMUX_RX_CTL);
+
+ if ((spu_link & SPU_STATUS1_RCV_LNK) &&
+ !(smu_link & SMU_RX_CTL_STATUS)) {
lmac->link_up = 1;
if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
lmac->last_speed = 40000;
@@ -634,9 +635,15 @@ static void bgx_poll_for_link(struct work_struct *work)
}
if (lmac->last_link != lmac->link_up) {
+ if (lmac->link_up) {
+ if (bgx_xaui_check_link(lmac)) {
+ /* Errors, clear link_up state */
+ lmac->link_up = 0;
+ lmac->last_speed = SPEED_UNKNOWN;
+ lmac->last_duplex = DUPLEX_UNKNOWN;
+ }
+ }
lmac->last_link = lmac->link_up;
- if (lmac->link_up)
- bgx_xaui_check_link(lmac);
}
queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
@@ -708,7 +715,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
{
struct lmac *lmac;
- u64 cmrx_cfg;
+ u64 cfg;
lmac = &bgx->lmac[lmacid];
if (lmac->check_link) {
@@ -717,9 +724,33 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
destroy_workqueue(lmac->check_link);
}
- cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
- cmrx_cfg &= ~(1 << 15);
- bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+ /* Disable packet reception */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_RX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Give chance for Rx/Tx FIFO to get drained */
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_RX_FIFO_LEN, (u64)0x1FFF, true);
+ bgx_poll_reg(bgx, lmacid, BGX_CMRX_TX_FIFO_LEN, (u64)0x3FFF, true);
+
+ /* Disable packet transmission */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_PKT_TX_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ /* Disable serdes lanes */
+ if (!lmac->is_sgmii)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+ else
+ bgx_reg_modify(bgx, lmacid,
+ BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_PWR_DN);
+
+ /* Disable LMAC */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
bgx_flush_dmac_addrs(bgx, lmacid);
if ((bgx->lmac_type != BGX_MODE_XFI) &&
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 149e179363a1..42010d2e5ddf 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -41,6 +41,7 @@
#define BGX_CMRX_RX_STAT10 0xC0
#define BGX_CMRX_RX_BP_DROP 0xC8
#define BGX_CMRX_RX_DMAC_CTL 0x0E8
+#define BGX_CMRX_RX_FIFO_LEN 0x108
#define BGX_CMR_RX_DMACX_CAM 0x200
#define RX_DMACX_CAM_EN BIT_ULL(48)
#define RX_DMACX_CAM_LMACID(x) (x << 49)
@@ -50,6 +51,7 @@
#define BGX_CMR_CHAN_MSK_AND 0x450
#define BGX_CMR_BIST_STATUS 0x460
#define BGX_CMR_RX_LMACS 0x468
+#define BGX_CMRX_TX_FIFO_LEN 0x518
#define BGX_CMRX_TX_STAT0 0x600
#define BGX_CMRX_TX_STAT1 0x608
#define BGX_CMRX_TX_STAT2 0x610
diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig
index 4686a85a8a22..5713e83be08c 100644
--- a/drivers/net/ethernet/chelsio/Kconfig
+++ b/drivers/net/ethernet/chelsio/Kconfig
@@ -96,17 +96,6 @@ config CHELSIO_T4_DCB
If unsure, say N.
-config CHELSIO_T4_UWIRE
- bool "Unified Wire Support for Chelsio T5 cards"
- default n
- depends on CHELSIO_T4
- ---help---
- Enable unified-wire offload features.
- Say Y here if you want to enable unified-wire over Ethernet
- in the driver.
-
- If unsure, say N.
-
config CHELSIO_T4_FCOE
bool "Fibre Channel over Ethernet (FCoE) Support for Chelsio T5 cards"
default n
@@ -137,4 +126,9 @@ config CHELSIO_T4VF
To compile this driver as a module choose M here; the module
will be called cxgb4vf.
+config CHELSIO_LIB
+ tristate
+ ---help---
+ Common library for Chelsio drivers.
+
endif # NET_VENDOR_CHELSIO
diff --git a/drivers/net/ethernet/chelsio/Makefile b/drivers/net/ethernet/chelsio/Makefile
index 390510b5e90f..b6a5eec6ed8e 100644
--- a/drivers/net/ethernet/chelsio/Makefile
+++ b/drivers/net/ethernet/chelsio/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_CHELSIO_T1) += cxgb/
obj-$(CONFIG_CHELSIO_T3) += cxgb3/
obj-$(CONFIG_CHELSIO_T4) += cxgb4/
obj-$(CONFIG_CHELSIO_T4VF) += cxgb4vf/
+obj-$(CONFIG_CHELSIO_LIB) += libcxgb/
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 526ea74e82d9..86f467a2c485 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1664,8 +1664,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
struct cmdQ *q = &sge->cmdQ[qid];
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
- if (!spin_trylock(&q->lock))
- return NETDEV_TX_LOCKED;
+ spin_lock(&q->lock);
reclaim_completed_tx(sge, q);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/Makefile b/drivers/net/ethernet/chelsio/cxgb4/Makefile
index 85c92821b239..ace0ab98d0f1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/Makefile
+++ b/drivers/net/ethernet/chelsio/cxgb4/Makefile
@@ -7,5 +7,4 @@ obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
-cxgb4-$(CONFIG_CHELSIO_T4_UWIRE) += cxgb4_ppm.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 326d4009525e..edd23386b47d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -324,7 +324,9 @@ struct adapter_params {
unsigned int sf_fw_start; /* start of FW image in flash */
unsigned int fw_vers;
+ unsigned int bs_vers; /* bootstrap version */
unsigned int tp_vers;
+ unsigned int er_vers; /* expansion ROM version */
u8 api_vers[7];
unsigned short mtus[NMTUS];
@@ -357,6 +359,34 @@ struct sge_idma_monitor_state {
unsigned int idma_warn[2]; /* time to warning in HZ */
};
+/* Firmware Mailbox Command/Reply log. All values are in Host-Endian format.
+ * The access and execute times are signed in order to accommodate negative
+ * error returns.
+ */
+struct mbox_cmd {
+ u64 cmd[MBOX_LEN / 8]; /* a Firmware Mailbox Command/Reply */
+ u64 timestamp; /* OS-dependent timestamp */
+ u32 seqno; /* sequence number */
+ s16 access; /* time (ms) to access mailbox */
+ s16 execute; /* time (ms) to execute */
+};
+
+struct mbox_cmd_log {
+ unsigned int size; /* number of entries in the log */
+ unsigned int cursor; /* next position in the log to write */
+ u32 seqno; /* next sequence number */
+ /* variable length mailbox command log starts here */
+};
+
+/* Given a pointer to a Firmware Mailbox Command Log and a log entry index,
+ * return a pointer to the specified entry.
+ */
+static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
+ unsigned int entry_idx)
+{
+ return &((struct mbox_cmd *)&(log)[1])[entry_idx];
+}
+
#include "t4fw_api.h"
#define FW_VERSION(chip) ( \
@@ -388,12 +418,14 @@ struct trace_params {
struct link_config {
unsigned short supported; /* link capabilities */
unsigned short advertising; /* advertised capabilities */
- unsigned short requested_speed; /* speed user has requested */
- unsigned short speed; /* actual link speed */
+ unsigned short lp_advertising; /* peer advertised capabilities */
+ unsigned int requested_speed; /* speed user has requested */
+ unsigned int speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
unsigned char link_ok; /* link up? */
+ unsigned char link_down_rc; /* link down reason */
};
#define FW_LEN16(fw_struct) FW_CMD_LEN16_V(sizeof(fw_struct) / 16)
@@ -731,6 +763,7 @@ struct adapter {
u32 t4_bar0;
struct pci_dev *pdev;
struct device *pdev_dev;
+ const char *name;
unsigned int mbox;
unsigned int pf;
unsigned int flags;
@@ -776,6 +809,10 @@ struct adapter {
struct work_struct db_drop_task;
bool tid_release_task_busy;
+ /* support for mailbox command/reply logging */
+#define T4_OS_LOG_MBOX_CMDS 256
+ struct mbox_cmd_log *mbox_log;
+
struct dentry *debugfs_root;
bool use_bd; /* Use SGE Back Door intfc for reading SGE Contexts */
bool trace_rss; /* 1 implies that different RSS flit per filter is
@@ -1306,6 +1343,7 @@ int t4_fl_pkt_align(struct adapter *adap);
unsigned int t4_flash_cfg_addr(struct adapter *adapter);
int t4_check_fw_version(struct adapter *adap);
int t4_get_fw_version(struct adapter *adapter, u32 *vers);
+int t4_get_bs_version(struct adapter *adapter, u32 *vers);
int t4_get_tp_version(struct adapter *adapter, u32 *vers);
int t4_get_exprom_version(struct adapter *adapter, u32 *vers);
int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
@@ -1329,6 +1367,8 @@ int t4_init_sge_params(struct adapter *adapter);
int t4_init_tp_params(struct adapter *adap);
int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_init_rss_mode(struct adapter *adap, int mbox);
+int t4_init_portinfo(struct port_info *pi, int mbox,
+ int port, int pf, int vf, u8 mac[]);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
@@ -1464,6 +1504,7 @@ int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
unsigned int vf, unsigned int eqid);
int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox);
+void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl);
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
void t4_db_full(struct adapter *adapter);
void t4_db_dropped(struct adapter *adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 052c660aca80..6ee2ed30626b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -253,7 +253,7 @@ void cxgb4_dcb_handle_fw_update(struct adapter *adap,
{
const union fw_port_dcb *fwdcb = &pcmd->u.dcb;
int port = FW_PORT_CMD_PORTID_G(be32_to_cpu(pcmd->op_to_portid));
- struct net_device *dev = adap->port[port];
+ struct net_device *dev = adap->port[adap->chan_map[port]];
struct port_info *pi = netdev_priv(dev);
struct port_dcb_info *dcb = &pi->dcb;
int dcb_type = pcmd->u.dcb.pgid.type;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 0bb41e9b9b1c..91fb50850fff 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -1152,6 +1152,104 @@ static const struct file_operations devlog_fops = {
.release = seq_release_private
};
+/* Show Firmware Mailbox Command/Reply Log
+ *
+ * Note that we don't do any locking when dumping the Firmware Mailbox Log so
+ * it's possible that we can catch things during a log update and therefore
+ * see partially corrupted log entries. But it's probably Good Enough(tm).
+ * If we ever decide that we want to make sure that we're dumping a coherent
+ * log, we'd need to perform locking in the mailbox logging and in
+ * mboxlog_open() where we'd need to grab the entire mailbox log in one go
+ * like we do for the Firmware Device Log.
+ */
+static int mboxlog_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ struct mbox_cmd_log *log = adapter->mbox_log;
+ struct mbox_cmd *entry;
+ int entry_idx, i;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq,
+ "%10s %15s %5s %5s %s\n",
+ "Seq#", "Tstamp", "Atime", "Etime",
+ "Command/Reply");
+ return 0;
+ }
+
+ entry_idx = log->cursor + ((uintptr_t)v - 2);
+ if (entry_idx >= log->size)
+ entry_idx -= log->size;
+ entry = mbox_cmd_log_entry(log, entry_idx);
+
+ /* skip over unused entries */
+ if (entry->timestamp == 0)
+ return 0;
+
+ seq_printf(seq, "%10u %15llu %5d %5d",
+ entry->seqno, entry->timestamp,
+ entry->access, entry->execute);
+ for (i = 0; i < MBOX_LEN / 8; i++) {
+ u64 flit = entry->cmd[i];
+ u32 hi = (u32)(flit >> 32);
+ u32 lo = (u32)flit;
+
+ seq_printf(seq, " %08x %08x", hi, lo);
+ }
+ seq_puts(seq, "\n");
+ return 0;
+}
+
+static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct adapter *adapter = seq->private;
+ struct mbox_cmd_log *log = adapter->mbox_log;
+
+ return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
+}
+
+static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return mboxlog_get_idx(seq, *pos);
+}
+
+static void mboxlog_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations mboxlog_seq_ops = {
+ .start = mboxlog_start,
+ .next = mboxlog_next,
+ .stop = mboxlog_stop,
+ .show = mboxlog_show
+};
+
+static int mboxlog_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &mboxlog_seq_ops);
+
+ if (!res) {
+ struct seq_file *seq = file->private_data;
+
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations mboxlog_fops = {
+ .owner = THIS_MODULE,
+ .open = mboxlog_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
static int mbox_show(struct seq_file *seq, void *v)
{
static const char * const owner[] = { "none", "FW", "driver",
@@ -1572,6 +1670,7 @@ static const struct file_operations flash_debugfs_fops = {
.owner = THIS_MODULE,
.open = mem_open,
.read = flash_read,
+ .llseek = default_llseek,
};
static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
@@ -3128,6 +3227,7 @@ int t4_setup_debugfs(struct adapter *adap)
{ "cim_qcfg", &cim_qcfg_fops, S_IRUSR, 0 },
{ "clk", &clk_debugfs_fops, S_IRUSR, 0 },
{ "devlog", &devlog_fops, S_IRUSR, 0 },
+ { "mboxlog", &mboxlog_fops, S_IRUSR, 0 },
{ "mbox0", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 0 },
{ "mbox1", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 1 },
{ "mbox2", &mbox_debugfs_fops, S_IRUSR | S_IWUSR, 2 },
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 7a0b92b2f73c..02f80febeb91 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -480,178 +480,293 @@ static int identify_port(struct net_device *dev,
return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
}
-static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
+/**
+ * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
+ * @port_type: Firmware Port Type
+ * @mod_type: Firmware Module Type
+ *
+ * Translate Firmware Port/Module type to Ethtool Port Type.
+ */
+static int from_fw_port_mod_type(enum fw_port_type port_type,
+ enum fw_port_module_type mod_type)
{
- unsigned int v = 0;
-
- if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
- type == FW_PORT_TYPE_BT_XAUI) {
- v |= SUPPORTED_TP;
- if (caps & FW_PORT_CAP_SPEED_100M)
- v |= SUPPORTED_100baseT_Full;
- if (caps & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseT_Full;
- if (caps & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseT_Full;
- } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
- v |= SUPPORTED_Backplane;
- if (caps & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseKX_Full;
- if (caps & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseKX4_Full;
- } else if (type == FW_PORT_TYPE_KR) {
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
- } else if (type == FW_PORT_TYPE_BP_AP) {
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
- SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
- } else if (type == FW_PORT_TYPE_BP4_AP) {
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
- SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
- SUPPORTED_10000baseKX4_Full;
- } else if (type == FW_PORT_TYPE_FIBER_XFI ||
- type == FW_PORT_TYPE_FIBER_XAUI ||
- type == FW_PORT_TYPE_SFP ||
- type == FW_PORT_TYPE_QSFP_10G ||
- type == FW_PORT_TYPE_QSA) {
- v |= SUPPORTED_FIBRE;
- if (caps & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseT_Full;
- if (caps & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseT_Full;
- } else if (type == FW_PORT_TYPE_BP40_BA ||
- type == FW_PORT_TYPE_QSFP) {
- v |= SUPPORTED_40000baseSR4_Full;
- v |= SUPPORTED_FIBRE;
+ if (port_type == FW_PORT_TYPE_BT_SGMII ||
+ port_type == FW_PORT_TYPE_BT_XFI ||
+ port_type == FW_PORT_TYPE_BT_XAUI) {
+ return PORT_TP;
+ } else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
+ port_type == FW_PORT_TYPE_FIBER_XAUI) {
+ return PORT_FIBRE;
+ } else if (port_type == FW_PORT_TYPE_SFP ||
+ port_type == FW_PORT_TYPE_QSFP_10G ||
+ port_type == FW_PORT_TYPE_QSA ||
+ port_type == FW_PORT_TYPE_QSFP) {
+ if (mod_type == FW_PORT_MOD_TYPE_LR ||
+ mod_type == FW_PORT_MOD_TYPE_SR ||
+ mod_type == FW_PORT_MOD_TYPE_ER ||
+ mod_type == FW_PORT_MOD_TYPE_LRM)
+ return PORT_FIBRE;
+ else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ return PORT_DA;
+ else
+ return PORT_OTHER;
}
- if (caps & FW_PORT_CAP_ANEG)
- v |= SUPPORTED_Autoneg;
- return v;
+ return PORT_OTHER;
}
-static unsigned int to_fw_linkcaps(unsigned int caps)
+/**
+ * speed_to_fw_caps - translate Port Speed to Firmware Port Capabilities
+ * @speed: speed in Kb/s
+ *
+ * Translates a specific Port Speed into a Firmware Port Capabilities
+ * value.
+ */
+static unsigned int speed_to_fw_caps(int speed)
{
- unsigned int v = 0;
-
- if (caps & ADVERTISED_100baseT_Full)
- v |= FW_PORT_CAP_SPEED_100M;
- if (caps & ADVERTISED_1000baseT_Full)
- v |= FW_PORT_CAP_SPEED_1G;
- if (caps & ADVERTISED_10000baseT_Full)
- v |= FW_PORT_CAP_SPEED_10G;
- if (caps & ADVERTISED_40000baseSR4_Full)
- v |= FW_PORT_CAP_SPEED_40G;
- return v;
+ if (speed == 100)
+ return FW_PORT_CAP_SPEED_100M;
+ if (speed == 1000)
+ return FW_PORT_CAP_SPEED_1G;
+ if (speed == 10000)
+ return FW_PORT_CAP_SPEED_10G;
+ if (speed == 25000)
+ return FW_PORT_CAP_SPEED_25G;
+ if (speed == 40000)
+ return FW_PORT_CAP_SPEED_40G;
+ if (speed == 100000)
+ return FW_PORT_CAP_SPEED_100G;
+ return 0;
}
-static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+/**
+ * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
+ * @port_type: Firmware Port Type
+ * @fw_caps: Firmware Port Capabilities
+ * @link_mode_mask: ethtool Link Mode Mask
+ *
+ * Translate a Firmware Port Capabilities specification to an ethtool
+ * Link Mode Mask.
+ */
+static void fw_caps_to_lmm(enum fw_port_type port_type,
+ unsigned int fw_caps,
+ unsigned long *link_mode_mask)
{
- const struct port_info *p = netdev_priv(dev);
-
- if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
- p->port_type == FW_PORT_TYPE_BT_XFI ||
- p->port_type == FW_PORT_TYPE_BT_XAUI) {
- cmd->port = PORT_TP;
- } else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
- p->port_type == FW_PORT_TYPE_FIBER_XAUI) {
- cmd->port = PORT_FIBRE;
- } else if (p->port_type == FW_PORT_TYPE_SFP ||
- p->port_type == FW_PORT_TYPE_QSFP_10G ||
- p->port_type == FW_PORT_TYPE_QSA ||
- p->port_type == FW_PORT_TYPE_QSFP) {
- if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
- p->mod_type == FW_PORT_MOD_TYPE_SR ||
- p->mod_type == FW_PORT_MOD_TYPE_ER ||
- p->mod_type == FW_PORT_MOD_TYPE_LRM)
- cmd->port = PORT_FIBRE;
- else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
- p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
- cmd->port = PORT_DA;
- else
- cmd->port = PORT_OTHER;
+ #define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name \
+ ## _BIT, link_mode_mask)
+
+ #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
+ do { \
+ if (fw_caps & FW_PORT_CAP_ ## __fw_name) \
+ SET_LMM(__lmm_name); \
+ } while (0)
+
+ switch (port_type) {
+ case FW_PORT_TYPE_BT_SGMII:
+ case FW_PORT_TYPE_BT_XFI:
+ case FW_PORT_TYPE_BT_XAUI:
+ SET_LMM(TP);
+ FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
+ break;
+
+ case FW_PORT_TYPE_KX4:
+ case FW_PORT_TYPE_KX:
+ SET_LMM(Backplane);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
+ break;
+
+ case FW_PORT_TYPE_KR:
+ SET_LMM(Backplane);
+ SET_LMM(10000baseKR_Full);
+ break;
+
+ case FW_PORT_TYPE_BP_AP:
+ SET_LMM(Backplane);
+ SET_LMM(10000baseR_FEC);
+ SET_LMM(10000baseKR_Full);
+ SET_LMM(1000baseKX_Full);
+ break;
+
+ case FW_PORT_TYPE_BP4_AP:
+ SET_LMM(Backplane);
+ SET_LMM(10000baseR_FEC);
+ SET_LMM(10000baseKR_Full);
+ SET_LMM(1000baseKX_Full);
+ SET_LMM(10000baseKX4_Full);
+ break;
+
+ case FW_PORT_TYPE_FIBER_XFI:
+ case FW_PORT_TYPE_FIBER_XAUI:
+ case FW_PORT_TYPE_SFP:
+ case FW_PORT_TYPE_QSFP_10G:
+ case FW_PORT_TYPE_QSA:
+ SET_LMM(FIBRE);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
+ break;
+
+ case FW_PORT_TYPE_BP40_BA:
+ case FW_PORT_TYPE_QSFP:
+ SET_LMM(FIBRE);
+ SET_LMM(40000baseSR4_Full);
+ break;
+
+ case FW_PORT_TYPE_CR_QSFP:
+ case FW_PORT_TYPE_SFP28:
+ SET_LMM(FIBRE);
+ SET_LMM(25000baseCR_Full);
+ break;
+
+ case FW_PORT_TYPE_KR4_100G:
+ case FW_PORT_TYPE_CR4_QSFP:
+ SET_LMM(FIBRE);
+ SET_LMM(100000baseCR4_Full);
+ break;
+
+ default:
+ break;
+ }
+
+ FW_CAPS_TO_LMM(ANEG, Autoneg);
+ FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
+ FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
+
+ #undef FW_CAPS_TO_LMM
+ #undef SET_LMM
+}
+
+/**
+ * lmm_to_fw_caps - translate ethtool Link Mode Mask to Firmware
+ * capabilities
+ *
+ * @link_mode_mask: ethtool Link Mode Mask
+ *
+ * Translate ethtool Link Mode Mask into a Firmware Port capabilities
+ * value.
+ */
+static unsigned int lmm_to_fw_caps(const unsigned long *link_mode_mask)
+{
+ unsigned int fw_caps = 0;
+
+ #define LMM_TO_FW_CAPS(__lmm_name, __fw_name) \
+ do { \
+ if (test_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
+ link_mode_mask)) \
+ fw_caps |= FW_PORT_CAP_ ## __fw_name; \
+ } while (0)
+
+ LMM_TO_FW_CAPS(100baseT_Full, SPEED_100M);
+ LMM_TO_FW_CAPS(1000baseT_Full, SPEED_1G);
+ LMM_TO_FW_CAPS(10000baseT_Full, SPEED_10G);
+ LMM_TO_FW_CAPS(40000baseSR4_Full, SPEED_40G);
+ LMM_TO_FW_CAPS(25000baseCR_Full, SPEED_25G);
+ LMM_TO_FW_CAPS(100000baseCR4_Full, SPEED_100G);
+
+ #undef LMM_TO_FW_CAPS
+
+ return fw_caps;
+}
+
+static int get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *link_ksettings)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ struct ethtool_link_settings *base = &link_ksettings->base;
+
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
+
+ base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
+
+ if (pi->mdio_addr >= 0) {
+ base->phy_address = pi->mdio_addr;
+ base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
+ ? ETH_MDIO_SUPPORTS_C22
+ : ETH_MDIO_SUPPORTS_C45);
} else {
- cmd->port = PORT_OTHER;
+ base->phy_address = 255;
+ base->mdio_support = 0;
}
- if (p->mdio_addr >= 0) {
- cmd->phy_address = p->mdio_addr;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
- MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+ fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported,
+ link_ksettings->link_modes.supported);
+ fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising,
+ link_ksettings->link_modes.advertising);
+ fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising,
+ link_ksettings->link_modes.lp_advertising);
+
+ if (netif_carrier_ok(dev)) {
+ base->speed = pi->link_cfg.speed;
+ base->duplex = DUPLEX_FULL;
} else {
- cmd->phy_address = 0; /* not really, but no better option */
- cmd->transceiver = XCVR_INTERNAL;
- cmd->mdio_support = 0;
+ base->speed = SPEED_UNKNOWN;
+ base->duplex = DUPLEX_UNKNOWN;
}
- cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
- cmd->advertising = from_fw_linkcaps(p->port_type,
- p->link_cfg.advertising);
- ethtool_cmd_speed_set(cmd,
- netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
- cmd->duplex = DUPLEX_FULL;
- cmd->autoneg = p->link_cfg.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
- return 0;
-}
+ base->autoneg = pi->link_cfg.autoneg;
+ if (pi->link_cfg.supported & FW_PORT_CAP_ANEG)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, Autoneg);
+ if (pi->link_cfg.autoneg)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, Autoneg);
-static unsigned int speed_to_caps(int speed)
-{
- if (speed == 100)
- return FW_PORT_CAP_SPEED_100M;
- if (speed == 1000)
- return FW_PORT_CAP_SPEED_1G;
- if (speed == 10000)
- return FW_PORT_CAP_SPEED_10G;
- if (speed == 40000)
- return FW_PORT_CAP_SPEED_40G;
return 0;
}
-static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings
+ *link_ksettings)
{
- unsigned int cap;
- struct port_info *p = netdev_priv(dev);
- struct link_config *lc = &p->link_cfg;
- u32 speed = ethtool_cmd_speed(cmd);
+ struct port_info *pi = netdev_priv(dev);
+ struct link_config *lc = &pi->link_cfg;
+ const struct ethtool_link_settings *base = &link_ksettings->base;
struct link_config old_lc;
- int ret;
+ unsigned int fw_caps;
+ int ret = 0;
- if (cmd->duplex != DUPLEX_FULL) /* only full-duplex supported */
+ /* only full-duplex supported */
+ if (base->duplex != DUPLEX_FULL)
return -EINVAL;
if (!(lc->supported & FW_PORT_CAP_ANEG)) {
/* PHY offers a single speed. See if that's what's
* being requested.
*/
- if (cmd->autoneg == AUTONEG_DISABLE &&
- (lc->supported & speed_to_caps(speed)))
+ if (base->autoneg == AUTONEG_DISABLE &&
+ (lc->supported & speed_to_fw_caps(base->speed)))
return 0;
return -EINVAL;
}
old_lc = *lc;
- if (cmd->autoneg == AUTONEG_DISABLE) {
- cap = speed_to_caps(speed);
+ if (base->autoneg == AUTONEG_DISABLE) {
+ fw_caps = speed_to_fw_caps(base->speed);
- if (!(lc->supported & cap))
+ if (!(lc->supported & fw_caps))
return -EINVAL;
- lc->requested_speed = cap;
+ lc->requested_speed = fw_caps;
lc->advertising = 0;
} else {
- cap = to_fw_linkcaps(cmd->advertising);
- if (!(lc->supported & cap))
+ fw_caps =
+ lmm_to_fw_caps(link_ksettings->link_modes.advertising);
+
+ if (!(lc->supported & fw_caps))
return -EINVAL;
lc->requested_speed = 0;
- lc->advertising = cap | FW_PORT_CAP_ANEG;
+ lc->advertising = fw_caps | FW_PORT_CAP_ANEG;
}
- lc->autoneg = cmd->autoneg;
+ lc->autoneg = base->autoneg;
/* If the firmware rejects the Link Configuration request, back out
* the changes and report the error.
*/
- ret = t4_link_l1cfg(p->adapter, p->adapter->mbox, p->tx_chan, lc);
+ ret = t4_link_l1cfg(pi->adapter, pi->adapter->mbox, pi->tx_chan, lc);
if (ret)
*lc = old_lc;
@@ -1093,8 +1208,8 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
}
static const struct ethtool_ops cxgb_ethtool_ops = {
- .get_settings = get_settings,
- .set_settings = set_settings,
+ .get_link_ksettings = get_link_ksettings,
+ .set_link_ksettings = set_link_ksettings,
.get_drvinfo = get_drvinfo,
.get_msglevel = get_msglevel,
.set_msglevel = set_msglevel,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d1e3f0997d6b..3ceafb55d6da 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -64,6 +64,7 @@
#include <net/bonding.h>
#include <net/addrconf.h>
#include <asm/uaccess.h>
+#include <linux/crash_dump.h>
#include "cxgb4.h"
#include "t4_regs.h"
@@ -168,7 +169,8 @@ MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter,"
static int dflt_msg_enable = DFLT_MSG_ENABLE;
module_param(dflt_msg_enable, int, 0644);
-MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap, "
+ "deprecated parameter");
/*
* The driver uses the best interrupt scheme available on a platform in the
@@ -205,7 +207,7 @@ static int rx_dma_offset = 2;
static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
module_param_array(num_vf, uint, NULL, 0644);
-MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
+MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3, deprecated parameter - please use the pci sysfs interface.");
#endif
/* TX Queue select used to determine what algorithm to use for selecting TX
@@ -303,6 +305,22 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
}
#endif /* CONFIG_CHELSIO_T4_DCB */
+int cxgb4_dcb_enabled(const struct net_device *dev)
+{
+#ifdef CONFIG_CHELSIO_T4_DCB
+ struct port_info *pi = netdev_priv(dev);
+
+ if (!pi->dcb.enabled)
+ return 0;
+
+ return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
+ (pi->dcb.state == CXGB4_DCB_STATE_HOST));
+#else
+ return 0;
+#endif
+}
+EXPORT_SYMBOL(cxgb4_dcb_enabled);
+
void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
{
struct net_device *dev = adapter->port[port_id];
@@ -313,8 +331,10 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
netif_carrier_on(dev);
else {
#ifdef CONFIG_CHELSIO_T4_DCB
- cxgb4_dcb_state_init(dev);
- dcb_tx_queue_prio_enable(dev, false);
+ if (cxgb4_dcb_enabled(dev)) {
+ cxgb4_dcb_state_init(dev);
+ dcb_tx_queue_prio_enable(dev, false);
+ }
#endif /* CONFIG_CHELSIO_T4_DCB */
netif_carrier_off(dev);
}
@@ -336,6 +356,17 @@ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
netdev_info(dev, "port module unplugged\n");
else if (pi->mod_type < ARRAY_SIZE(mod_str))
netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+ netdev_info(dev, "%s: unsupported port module inserted\n",
+ dev->name);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+ netdev_info(dev, "%s: unknown port module inserted\n",
+ dev->name);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
+ netdev_info(dev, "%s: transceiver module error\n", dev->name);
+ else
+ netdev_info(dev, "%s: unknown module type %d inserted\n",
+ dev->name, pi->mod_type);
}
int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
@@ -430,11 +461,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- if (!(dev->flags & IFF_PROMISC)) {
- __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
- if (!(dev->flags & IFF_ALLMULTI))
- __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
- }
+ __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
+ __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
(dev->flags & IFF_PROMISC) ? 1 : 0,
@@ -482,28 +510,12 @@ static int link_start(struct net_device *dev)
return ret;
}
-int cxgb4_dcb_enabled(const struct net_device *dev)
-{
-#ifdef CONFIG_CHELSIO_T4_DCB
- struct port_info *pi = netdev_priv(dev);
-
- if (!pi->dcb.enabled)
- return 0;
-
- return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
- (pi->dcb.state == CXGB4_DCB_STATE_HOST));
-#else
- return 0;
-#endif
-}
-EXPORT_SYMBOL(cxgb4_dcb_enabled);
-
#ifdef CONFIG_CHELSIO_T4_DCB
/* Handle a Data Center Bridging update message from the firmware. */
static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
{
int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
- struct net_device *dev = adap->port[port];
+ struct net_device *dev = adap->port[adap->chan_map[port]];
int old_dcb_enabled = cxgb4_dcb_enabled(dev);
int new_dcb_enabled;
@@ -633,7 +645,8 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
action == FW_PORT_ACTION_GET_PORT_INFO) {
int port = FW_PORT_CMD_PORTID_G(
be32_to_cpu(pcmd->op_to_portid));
- struct net_device *dev = q->adap->port[port];
+ struct net_device *dev =
+ q->adap->port[q->adap->chan_map[port]];
int state_input = ((pcmd->u.info.dcbxdis_pkd &
FW_PORT_CMD_DCBXDIS_F)
? CXGB4_DCB_INPUT_FW_DISABLED
@@ -3720,7 +3733,8 @@ static int adap_init0(struct adapter *adap)
return ret;
/* Contact FW, advertising Master capability */
- ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
+ ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
+ is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
if (ret < 0) {
dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
ret);
@@ -3737,7 +3751,10 @@ static int adap_init0(struct adapter *adap)
* is excessively mismatched relative to the driver.)
*/
t4_get_fw_version(adap, &adap->params.fw_vers);
+ t4_get_bs_version(adap, &adap->params.bs_vers);
t4_get_tp_version(adap, &adap->params.tp_vers);
+ t4_get_exprom_version(adap, &adap->params.er_vers);
+
ret = t4_check_fw_version(adap);
/* If firmware is too old (not supported by driver) force an update. */
if (ret)
@@ -4288,10 +4305,17 @@ static const struct pci_error_handlers cxgb4_eeh = {
.resume = eeh_resume,
};
+/* Return true if the Link Configuration supports "High Speeds" (those greater
+ * than 1Gb/s).
+ */
static inline bool is_x_10g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
- (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+ unsigned int speeds, high_speeds;
+
+ speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
+ high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+ return high_speeds != 0;
}
static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
@@ -4318,6 +4342,11 @@ static void cfg_queues(struct adapter *adap)
#endif
int ciq_size;
+ /* Reduce memory usage in kdump environment, disable all offload.
+ */
+ if (is_kdump_kernel())
+ adap->params.offload = 0;
+
for_each_port(adap, i)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
#ifdef CONFIG_CHELSIO_T4_DCB
@@ -4651,6 +4680,68 @@ static void cxgb4_check_pcie_caps(struct adapter *adap)
"suggested for optimal performance.\n");
}
+/* Dump basic information about the adapter */
+static void print_adapter_info(struct adapter *adapter)
+{
+ /* Device information */
+ dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
+ adapter->params.vpd.id,
+ CHELSIO_CHIP_RELEASE(adapter->params.chip));
+ dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
+ adapter->params.vpd.sn, adapter->params.vpd.pn);
+
+ /* Firmware Version */
+ if (!adapter->params.fw_vers)
+ dev_warn(adapter->pdev_dev, "No firmware loaded\n");
+ else
+ dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
+
+ /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
+ * Firmware, so dev_info() is more appropriate here.)
+ */
+ if (!adapter->params.bs_vers)
+ dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
+ else
+ dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
+
+ /* TP Microcode Version */
+ if (!adapter->params.tp_vers)
+ dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
+ else
+ dev_info(adapter->pdev_dev,
+ "TP Microcode version: %u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
+
+ /* Expansion ROM version */
+ if (!adapter->params.er_vers)
+ dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
+ else
+ dev_info(adapter->pdev_dev,
+ "Expansion ROM version: %u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
+ FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
+ FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
+ FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
+
+ /* Software/Hardware configuration */
+ dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
+ is_offload(adapter) ? "R" : "",
+ ((adapter->flags & USING_MSIX) ? "MSI-X" :
+ (adapter->flags & USING_MSI) ? "MSI" : ""),
+ is_offload(adapter) ? "Offload" : "non-Offload");
+}
+
static void print_port_info(const struct net_device *dev)
{
char buf[80];
@@ -4672,20 +4763,18 @@ static void print_port_info(const struct net_device *dev)
bufp += sprintf(bufp, "1000/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
+ bufp += sprintf(bufp, "25G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
bufp += sprintf(bufp, "40G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
+ bufp += sprintf(bufp, "100G/");
if (bufp != buf)
--bufp;
sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
- netdev_info(dev, "Chelsio %s rev %d %s %sNIC %s\n",
- adap->params.vpd.id,
- CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
- is_offload(adap) ? "R" : "",
- (adap->flags & USING_MSIX) ? " MSI-X" :
- (adap->flags & USING_MSI) ? " MSI" : "");
- netdev_info(dev, "S/N: %s, P/N: %s\n",
- adap->params.vpd.sn, adap->params.vpd.pn);
+ netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
+ dev->name, adap->params.vpd.id, adap->name, buf);
}
static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
@@ -4755,6 +4844,60 @@ static int get_chip_type(struct pci_dev *pdev, u32 pl_rev)
return -EINVAL;
}
+#ifdef CONFIG_PCI_IOV
+static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ int err = 0;
+ int current_vfs = pci_num_vf(pdev);
+ u32 pcie_fw;
+ void __iomem *regs;
+
+ regs = pci_ioremap_bar(pdev, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "cannot map device registers\n");
+ return -ENOMEM;
+ }
+
+ pcie_fw = readl(regs + PCIE_FW_A);
+ iounmap(regs);
+ /* Check if cxgb4 is the MASTER and fw is initialized */
+ if (!(pcie_fw & PCIE_FW_INIT_F) ||
+ !(pcie_fw & PCIE_FW_MASTER_VLD_F) ||
+ PCIE_FW_MASTER_G(pcie_fw) != 4) {
+ dev_warn(&pdev->dev,
+ "cxgb4 driver needs to be MASTER to support SRIOV\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* If any of the VF's is already assigned to Guest OS, then
+ * SRIOV for the same cannot be modified
+ */
+ if (current_vfs && pci_vfs_assigned(pdev)) {
+ dev_err(&pdev->dev,
+ "Cannot modify SR-IOV while VFs are assigned\n");
+ num_vfs = current_vfs;
+ return num_vfs;
+ }
+
+ /* Disable SRIOV when zero is passed.
+ * One needs to disable SRIOV before modifying it, else
+ * stack throws the below warning:
+ * " 'n' VFs already enabled. Disable before enabling 'm' VFs."
+ */
+ if (!num_vfs) {
+ pci_disable_sriov(pdev);
+ return num_vfs;
+ }
+
+ if (num_vfs != current_vfs) {
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err)
+ return err;
+ }
+ return num_vfs;
+}
+#endif
+
static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int func, i, err, s_qpp, qpp, num_seg;
@@ -4837,12 +4980,23 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_adapter;
}
+ adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+ (sizeof(struct mbox_cmd) *
+ T4_OS_LOG_MBOX_CMDS),
+ GFP_KERNEL);
+ if (!adapter->mbox_log) {
+ err = -ENOMEM;
+ goto out_free_adapter;
+ }
+ adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
+
/* PCI device has been enabled */
adapter->flags |= DEV_ENABLED;
adapter->regs = regs;
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
+ adapter->name = pci_name(pdev);
adapter->mbox = func;
adapter->pf = func;
adapter->msg_enable = dflt_msg_enable;
@@ -5073,13 +5227,20 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (is_offload(adapter))
attach_ulds(adapter);
+ print_adapter_info(adapter);
+
sriov:
#ifdef CONFIG_PCI_IOV
- if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
+ if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) {
+ dev_warn(&pdev->dev,
+ "Enabling SR-IOV VFs using the num_vf module "
+ "parameter is deprecated - please use the pci sysfs "
+ "interface instead.\n");
if (pci_enable_sriov(pdev, num_vf[func]) == 0)
dev_info(&pdev->dev,
"instantiated %u virtual functions\n",
num_vf[func]);
+ }
#endif
return 0;
@@ -5092,6 +5253,7 @@ sriov:
if (adapter->workq)
destroy_workqueue(adapter->workq);
+ kfree(adapter->mbox_log);
kfree(adapter);
out_unmap_bar0:
iounmap(regs);
@@ -5158,6 +5320,7 @@ static void remove_one(struct pci_dev *pdev)
adapter->flags &= ~DEV_ENABLED;
}
pci_release_regions(pdev);
+ kfree(adapter->mbox_log);
synchronize_rcu();
kfree(adapter);
} else
@@ -5170,6 +5333,9 @@ static struct pci_driver cxgb4_driver = {
.probe = init_one,
.remove = remove_one,
.shutdown = remove_one,
+#ifdef CONFIG_PCI_IOV
+ .sriov_configure = cxgb4_iov_configure,
+#endif
.err_handler = &cxgb4_eeh,
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 6278e5a74b74..ad3552df0545 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1192,7 +1192,7 @@ out_free: dev_kfree_skb_any(skb);
/* Discard the packet if the length is greater than mtu */
max_pkt_len = ETH_HLEN + dev->mtu;
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tagged(skb))
max_pkt_len += VLAN_HLEN;
if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
goto out_free;
@@ -3006,7 +3006,9 @@ void t4_free_sge_resources(struct adapter *adap)
if (etq->q.desc) {
t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
etq->q.cntxt_id);
+ __netif_tx_lock_bh(etq->txq);
free_tx_desc(adap, &etq->q, etq->q.in_use, true);
+ __netif_tx_unlock_bh(etq->txq);
kfree(etq->q.sdesc);
free_txq(adap, &etq->q);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 71586a3e0f61..660204bff726 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -224,18 +224,34 @@ static void fw_asrt(struct adapter *adap, u32 mbox_addr)
be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
}
-static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
+/**
+ * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
+ * @adapter: the adapter
+ * @cmd: the Firmware Mailbox Command or Reply
+ * @size: command length in bytes
+ * @access: the time (ms) needed to access the Firmware Mailbox
+ * @execute: the time (ms) the command spent being executed
+ */
+static void t4_record_mbox(struct adapter *adapter,
+ const __be64 *cmd, unsigned int size,
+ int access, int execute)
{
- dev_err(adap->pdev_dev,
- "mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
- (unsigned long long)t4_read_reg64(adap, data_reg),
- (unsigned long long)t4_read_reg64(adap, data_reg + 8),
- (unsigned long long)t4_read_reg64(adap, data_reg + 16),
- (unsigned long long)t4_read_reg64(adap, data_reg + 24),
- (unsigned long long)t4_read_reg64(adap, data_reg + 32),
- (unsigned long long)t4_read_reg64(adap, data_reg + 40),
- (unsigned long long)t4_read_reg64(adap, data_reg + 48),
- (unsigned long long)t4_read_reg64(adap, data_reg + 56));
+ struct mbox_cmd_log *log = adapter->mbox_log;
+ struct mbox_cmd *entry;
+ int i;
+
+ entry = mbox_cmd_log_entry(log, log->cursor++);
+ if (log->cursor == log->size)
+ log->cursor = 0;
+
+ for (i = 0; i < size / 8; i++)
+ entry->cmd[i] = be64_to_cpu(cmd[i]);
+ while (i < MBOX_LEN / 8)
+ entry->cmd[i++] = 0;
+ entry->timestamp = jiffies;
+ entry->seqno = log->seqno++;
+ entry->access = access;
+ entry->execute = execute;
}
/**
@@ -268,12 +284,16 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
1, 1, 3, 5, 10, 10, 20, 50, 100, 200
};
+ u16 access = 0;
+ u16 execute = 0;
u32 v;
u64 res;
- int i, ms, delay_idx;
+ int i, ms, delay_idx, ret;
const __be64 *p = cmd;
u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
+ __be64 cmd_rpl[MBOX_LEN / 8];
+ u32 pcie_fw;
if ((size & 15) || size > MBOX_LEN)
return -EINVAL;
@@ -285,13 +305,24 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
if (adap->pdev->error_state != pci_channel_io_normal)
return -EIO;
+ /* If we have a negative timeout, that implies that we can't sleep. */
+ if (timeout < 0) {
+ sleep_ok = false;
+ timeout = -timeout;
+ }
+
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
- if (v != MBOX_OWNER_DRV)
- return v ? -EBUSY : -ETIMEDOUT;
+ if (v != MBOX_OWNER_DRV) {
+ ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
+ t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
+ return ret;
+ }
+ /* Copy in the new mailbox command and send it on its way ... */
+ t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
for (i = 0; i < size; i += 8)
t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
@@ -301,7 +332,10 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
delay_idx = 0;
ms = delay[0];
- for (i = 0; i < timeout; i += ms) {
+ for (i = 0;
+ !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
+ i < timeout;
+ i += ms) {
if (sleep_ok) {
ms = delay[delay_idx]; /* last element may repeat */
if (delay_idx < ARRAY_SIZE(delay) - 1)
@@ -317,26 +351,31 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
continue;
}
- res = t4_read_reg64(adap, data_reg);
+ get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
+ res = be64_to_cpu(cmd_rpl[0]);
+
if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
fw_asrt(adap, data_reg);
res = FW_CMD_RETVAL_V(EIO);
} else if (rpl) {
- get_mbox_rpl(adap, rpl, size / 8, data_reg);
+ memcpy(rpl, cmd_rpl, size);
}
- if (FW_CMD_RETVAL_G((int)res))
- dump_mbox(adap, mbox, data_reg);
t4_write_reg(adap, ctl_reg, 0);
+
+ execute = i + ms;
+ t4_record_mbox(adap, cmd_rpl,
+ MBOX_LEN, access, execute);
return -FW_CMD_RETVAL_G((int)res);
}
}
- dump_mbox(adap, mbox, data_reg);
+ ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
+ t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
*(const u8 *)cmd, mbox);
t4_report_fw_error(adap);
- return -ETIMEDOUT;
+ return ret;
}
int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
@@ -2937,6 +2976,20 @@ int t4_get_fw_version(struct adapter *adapter, u32 *vers)
}
/**
+ * t4_get_bs_version - read the firmware bootstrap version
+ * @adapter: the adapter
+ * @vers: where to place the version
+ *
+ * Reads the FW Bootstrap version from flash.
+ */
+int t4_get_bs_version(struct adapter *adapter, u32 *vers)
+{
+ return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
+ offsetof(struct fw_hdr, fw_ver), 1,
+ vers, 0);
+}
+
+/**
* t4_get_tp_version - read the TP microcode version
* @adapter: the adapter
* @vers: where to place the version
@@ -3574,7 +3627,8 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
+ FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
FW_PORT_CAP_ANEG)
/**
@@ -7089,52 +7143,127 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
}
/**
- * t4_handle_fw_rpl - process a FW reply message
+ * t4_link_down_rc_str - return a string for a Link Down Reason Code
* @adap: the adapter
+ * @link_down_rc: Link Down Reason Code
+ *
+ * Returns a string representation of the Link Down Reason Code.
+ */
+static const char *t4_link_down_rc_str(unsigned char link_down_rc)
+{
+ static const char * const reason[] = {
+ "Link Down",
+ "Remote Fault",
+ "Auto-negotiation Failure",
+ "Reserved",
+ "Insufficient Airflow",
+ "Unable To Determine Reason",
+ "No RX Signal Detected",
+ "Reserved",
+ };
+
+ if (link_down_rc >= ARRAY_SIZE(reason))
+ return "Bad Reason Code";
+
+ return reason[link_down_rc];
+}
+
+/**
+ * t4_handle_get_port_info - process a FW reply message
+ * @pi: the port info
* @rpl: start of the FW message
*
- * Processes a FW message, such as link state change messages.
+ * Processes a GET_PORT_INFO FW reply message.
+ */
+void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
+{
+ const struct fw_port_cmd *p = (const void *)rpl;
+ struct adapter *adap = pi->adapter;
+
+ /* link/module state change message */
+ int speed = 0, fc = 0;
+ struct link_config *lc;
+ u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
+ int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
+ u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
+
+ if (stat & FW_PORT_CMD_RXPAUSE_F)
+ fc |= PAUSE_RX;
+ if (stat & FW_PORT_CMD_TXPAUSE_F)
+ fc |= PAUSE_TX;
+ if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
+ speed = 100;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
+ speed = 1000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
+ speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+ speed = 25000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
+ speed = 40000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+ speed = 100000;
+
+ lc = &pi->link_cfg;
+
+ if (mod != pi->mod_type) {
+ pi->mod_type = mod;
+ t4_os_portmod_changed(adap, pi->port_id);
+ }
+ if (link_ok != lc->link_ok || speed != lc->speed ||
+ fc != lc->fc) { /* something changed */
+ if (!link_ok && lc->link_ok) {
+ unsigned char rc = FW_PORT_CMD_LINKDNRC_G(stat);
+
+ lc->link_down_rc = rc;
+ dev_warn(adap->pdev_dev,
+ "Port %d link down, reason: %s\n",
+ pi->port_id, t4_link_down_rc_str(rc));
+ }
+ lc->link_ok = link_ok;
+ lc->speed = speed;
+ lc->fc = fc;
+ lc->supported = be16_to_cpu(p->u.info.pcap);
+ lc->lp_advertising = be16_to_cpu(p->u.info.lpacap);
+ t4_os_link_changed(adap, pi->port_id, link_ok);
+ }
+}
+
+/**
+ * t4_handle_fw_rpl - process a FW reply message
+ * @adap: the adapter
+ * @rpl: start of the FW message
+ *
+ * Processes a FW message, such as link state change messages.
*/
int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
{
u8 opcode = *(const u8 *)rpl;
- if (opcode == FW_PORT_CMD) { /* link/module state change message */
- int speed = 0, fc = 0;
- const struct fw_port_cmd *p = (void *)rpl;
+ /* This might be a port command ... this simplifies the following
+ * conditionals ... We can get away with pre-dereferencing
+ * action_to_len16 because it's in the first 16 bytes and all messages
+ * will be at least that long.
+ */
+ const struct fw_port_cmd *p = (const void *)rpl;
+ unsigned int action =
+ FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
+
+ if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
+ int i;
int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
- int port = adap->chan_map[chan];
- struct port_info *pi = adap2pinfo(adap, port);
- struct link_config *lc = &pi->link_cfg;
- u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
- int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
- u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
-
- if (stat & FW_PORT_CMD_RXPAUSE_F)
- fc |= PAUSE_RX;
- if (stat & FW_PORT_CMD_TXPAUSE_F)
- fc |= PAUSE_TX;
- if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
- speed = 100;
- else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
- speed = 1000;
- else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
- speed = 10000;
- else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
- speed = 40000;
-
- if (link_ok != lc->link_ok || speed != lc->speed ||
- fc != lc->fc) { /* something changed */
- lc->link_ok = link_ok;
- lc->speed = speed;
- lc->fc = fc;
- lc->supported = be16_to_cpu(p->u.info.pcap);
- t4_os_link_changed(adap, port, link_ok);
- }
- if (mod != pi->mod_type) {
- pi->mod_type = mod;
- t4_os_portmod_changed(adap, port);
+ struct port_info *pi = NULL;
+
+ for_each_port(adap, i) {
+ pi = adap2pinfo(adap, i);
+ if (pi->tx_chan == chan)
+ break;
}
+
+ t4_handle_get_port_info(pi, rpl);
+ } else {
+ dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n", opcode);
+ return -EINVAL;
}
return 0;
}
@@ -7161,6 +7290,7 @@ static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
static void init_link_config(struct link_config *lc, unsigned int caps)
{
lc->supported = caps;
+ lc->lp_advertising = 0;
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
@@ -7654,61 +7784,74 @@ int t4_init_rss_mode(struct adapter *adap, int mbox)
return 0;
}
-int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+/**
+ * t4_init_portinfo - allocate a virtual interface amd initialize port_info
+ * @pi: the port_info
+ * @mbox: mailbox to use for the FW command
+ * @port: physical port associated with the VI
+ * @pf: the PF owning the VI
+ * @vf: the VF owning the VI
+ * @mac: the MAC address of the VI
+ *
+ * Allocates a virtual interface for the given physical port. If @mac is
+ * not %NULL it contains the MAC address of the VI as assigned by FW.
+ * @mac should be large enough to hold an Ethernet address.
+ * Returns < 0 on error.
+ */
+int t4_init_portinfo(struct port_info *pi, int mbox,
+ int port, int pf, int vf, u8 mac[])
{
- u8 addr[6];
- int ret, i, j = 0;
+ int ret;
struct fw_port_cmd c;
- struct fw_rss_vi_config_cmd rvc;
+ unsigned int rss_size;
memset(&c, 0, sizeof(c));
- memset(&rvc, 0, sizeof(rvc));
+ c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
+ FW_CMD_REQUEST_F | FW_CMD_READ_F |
+ FW_PORT_CMD_PORTID_V(port));
+ c.action_to_len16 = cpu_to_be32(
+ FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_LEN16(c));
+ ret = t4_wr_mbox(pi->adapter, mbox, &c, sizeof(c), &c);
+ if (ret)
+ return ret;
+
+ ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
+ if (ret < 0)
+ return ret;
+
+ pi->viid = ret;
+ pi->tx_chan = port;
+ pi->lport = port;
+ pi->rss_size = rss_size;
+
+ ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
+ pi->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
+ FW_PORT_CMD_MDIOADDR_G(ret) : -1;
+ pi->port_type = FW_PORT_CMD_PTYPE_G(ret);
+ pi->mod_type = FW_PORT_MOD_TYPE_NA;
+
+ init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap));
+ return 0;
+}
+
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+{
+ u8 addr[6];
+ int ret, i, j = 0;
for_each_port(adap, i) {
- unsigned int rss_size;
- struct port_info *p = adap2pinfo(adap, i);
+ struct port_info *pi = adap2pinfo(adap, i);
while ((adap->params.portvec & (1 << j)) == 0)
j++;
- c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F |
- FW_PORT_CMD_PORTID_V(j));
- c.action_to_len16 = cpu_to_be32(
- FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
- FW_LEN16(c));
- ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+ ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
if (ret)
return ret;
- ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
- if (ret < 0)
- return ret;
-
- p->viid = ret;
- p->tx_chan = j;
- p->lport = j;
- p->rss_size = rss_size;
memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
adap->port[i]->dev_port = j;
-
- ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
- p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
- FW_PORT_CMD_MDIOADDR_G(ret) : -1;
- p->port_type = FW_PORT_CMD_PTYPE_G(ret);
- p->mod_type = FW_PORT_MOD_TYPE_NA;
-
- rvc.op_to_viid =
- cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F |
- FW_RSS_VI_CONFIG_CMD_VIID(p->viid));
- rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
- ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
- if (ret)
- return ret;
- p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
-
- init_link_config(&p->link_cfg, be16_to_cpu(c.u.info.pcap));
j++;
}
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 2fc60e83a7a1..7f59ca458431 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -220,6 +220,13 @@ enum {
FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+ /* Location of bootstrap firmware image in FLASH.
+ */
+ FLASH_FWBOOTSTRAP_START_SEC = 27,
+ FLASH_FWBOOTSTRAP_NSECS = 1,
+ FLASH_FWBOOTSTRAP_START = FLASH_START(FLASH_FWBOOTSTRAP_START_SEC),
+ FLASH_FWBOOTSTRAP_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FWBOOTSTRAP_NSECS),
+
/*
* iSCSI persistent/crash information.
*/
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index 80417fc564d4..e0ebe1378cb2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -104,6 +104,8 @@ enum {
enum CPL_error {
CPL_ERR_NONE = 0,
+ CPL_ERR_TCAM_PARITY = 1,
+ CPL_ERR_TCAM_MISS = 2,
CPL_ERR_TCAM_FULL = 3,
CPL_ERR_BAD_LENGTH = 15,
CPL_ERR_BAD_ROUTE = 18,
@@ -1392,6 +1394,10 @@ struct ulp_mem_io {
#define T5_ULP_MEMIO_ORDER_V(x) ((x) << T5_ULP_MEMIO_ORDER_S)
#define T5_ULP_MEMIO_ORDER_F T5_ULP_MEMIO_ORDER_V(1U)
+#define T5_ULP_MEMIO_FID_S 4
+#define T5_ULP_MEMIO_FID_M 0x7ff
+#define T5_ULP_MEMIO_FID_V(x) ((x) << T5_ULP_MEMIO_FID_S)
+
/* ulp_mem_io.lock_addr fields */
#define ULP_MEMIO_ADDR_S 0
#define ULP_MEMIO_ADDR_V(x) ((x) << ULP_MEMIO_ADDR_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a2cdfc1261dc..50812a1d67bd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -144,6 +144,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN
CH_PCI_ID_TABLE_FENTRY(0x5015), /* T502-bt */
CH_PCI_ID_TABLE_FENTRY(0x5016), /* T580-OCP-SO */
CH_PCI_ID_TABLE_FENTRY(0x5017), /* T520-OCP-SO */
+ CH_PCI_ID_TABLE_FENTRY(0x5018), /* T540-BT */
CH_PCI_ID_TABLE_FENTRY(0x5080), /* Custom T540-cr */
CH_PCI_ID_TABLE_FENTRY(0x5081), /* Custom T540-LL-cr */
CH_PCI_ID_TABLE_FENTRY(0x5082), /* Custom T504-cr */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 7ad6d4e75b2a..30507d44422c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2249,22 +2249,28 @@ struct fw_acl_vlan_cmd {
enum fw_port_cap {
FW_PORT_CAP_SPEED_100M = 0x0001,
FW_PORT_CAP_SPEED_1G = 0x0002,
- FW_PORT_CAP_SPEED_2_5G = 0x0004,
+ FW_PORT_CAP_SPEED_25G = 0x0004,
FW_PORT_CAP_SPEED_10G = 0x0008,
FW_PORT_CAP_SPEED_40G = 0x0010,
FW_PORT_CAP_SPEED_100G = 0x0020,
FW_PORT_CAP_FC_RX = 0x0040,
FW_PORT_CAP_FC_TX = 0x0080,
FW_PORT_CAP_ANEG = 0x0100,
- FW_PORT_CAP_MDI_0 = 0x0200,
- FW_PORT_CAP_MDI_1 = 0x0400,
- FW_PORT_CAP_BEAN = 0x0800,
- FW_PORT_CAP_PMA_LPBK = 0x1000,
- FW_PORT_CAP_PCS_LPBK = 0x2000,
- FW_PORT_CAP_PHYXS_LPBK = 0x4000,
- FW_PORT_CAP_FAR_END_LPBK = 0x8000,
+ FW_PORT_CAP_MDIX = 0x0200,
+ FW_PORT_CAP_MDIAUTO = 0x0400,
+ FW_PORT_CAP_FEC = 0x0800,
+ FW_PORT_CAP_TECHKR = 0x1000,
+ FW_PORT_CAP_TECHKX4 = 0x2000,
+ FW_PORT_CAP_802_3_PAUSE = 0x4000,
+ FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
};
+#define FW_PORT_CAP_SPEED_S 0
+#define FW_PORT_CAP_SPEED_M 0x3f
+#define FW_PORT_CAP_SPEED_V(x) ((x) << FW_PORT_CAP_SPEED_S)
+#define FW_PORT_CAP_SPEED_G(x) \
+ (((x) >> FW_PORT_CAP_SPEED_S) & FW_PORT_CAP_SPEED_M)
+
enum fw_port_mdi {
FW_PORT_CAP_MDI_UNCHANGED,
FW_PORT_CAP_MDI_AUTO,
@@ -2376,7 +2382,8 @@ struct fw_port_cmd {
__u8 cbllen;
__u8 auxlinfo;
__u8 dcbxdis_pkd;
- __u8 r8_lo[3];
+ __u8 r8_lo;
+ __be16 lpacap;
__be64 r9;
} info;
struct fw_port_diags {
@@ -2510,6 +2517,11 @@ struct fw_port_cmd {
#define FW_PORT_CMD_PTYPE_G(x) \
(((x) >> FW_PORT_CMD_PTYPE_S) & FW_PORT_CMD_PTYPE_M)
+#define FW_PORT_CMD_LINKDNRC_S 5
+#define FW_PORT_CMD_LINKDNRC_M 0x7
+#define FW_PORT_CMD_LINKDNRC_G(x) \
+ (((x) >> FW_PORT_CMD_LINKDNRC_S) & FW_PORT_CMD_LINKDNRC_M)
+
#define FW_PORT_CMD_MODTYPE_S 0
#define FW_PORT_CMD_MODTYPE_M 0x1f
#define FW_PORT_CMD_MODTYPE_V(x) ((x) << FW_PORT_CMD_MODTYPE_S)
@@ -2550,6 +2562,11 @@ enum fw_port_type {
FW_PORT_TYPE_QSA,
FW_PORT_TYPE_QSFP,
FW_PORT_TYPE_BP40_BA,
+ FW_PORT_TYPE_KR4_100G,
+ FW_PORT_TYPE_CR4_QSFP,
+ FW_PORT_TYPE_CR_QSFP,
+ FW_PORT_TYPE_CR2_QSFP,
+ FW_PORT_TYPE_SFP28,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_M
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index c4b262ca7d43..2accab386323 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0E
-#define T4FW_VERSION_MICRO 0x04
+#define T4FW_VERSION_MINOR 0x0F
+#define T4FW_VERSION_MICRO 0x25
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0E
-#define T5FW_VERSION_MICRO 0x04
+#define T5FW_VERSION_MINOR 0x0F
+#define T5FW_VERSION_MICRO 0x25
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0E
-#define T6FW_VERSION_MICRO 0x04
+#define T6FW_VERSION_MINOR 0x0F
+#define T6FW_VERSION_MICRO 0x25
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 4a707c32d76f..109bc630408b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -353,6 +353,10 @@ struct hash_mac_addr {
u8 addr[ETH_ALEN];
};
+struct mbox_list {
+ struct list_head list;
+};
+
/*
* Per-"adapter" (Virtual Function) information.
*/
@@ -387,6 +391,14 @@ struct adapter {
/* various locks */
spinlock_t stats_lock;
+ /* lock for mailbox cmd list */
+ spinlock_t mbox_lock;
+ struct mbox_list mlist;
+
+ /* support for mailbox command/reply logging */
+#define T4VF_OS_LOG_MBOX_CMDS 256
+ struct mbox_cmd_log *mbox_log;
+
/* list of MAC addresses in MPS Hash */
struct list_head mac_hlist;
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 1cc8a7a69457..e116bb8d1729 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -74,7 +74,8 @@ static int dflt_msg_enable = DFLT_MSG_ENABLE;
module_param(dflt_msg_enable, int, 0644);
MODULE_PARM_DESC(dflt_msg_enable,
- "default adapter ethtool message level bitmap");
+ "default adapter ethtool message level bitmap, "
+ "deprecated parameter");
/*
* The driver uses the best interrupt scheme available on a platform in the
@@ -936,12 +937,8 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
{
struct port_info *pi = netdev_priv(dev);
- if (!(dev->flags & IFF_PROMISC)) {
- __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
- if (!(dev->flags & IFF_ALLMULTI))
- __dev_mc_sync(dev, cxgb4vf_mac_sync,
- cxgb4vf_mac_unsync);
- }
+ __dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
+ __dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
(dev->flags & IFF_PROMISC) != 0,
(dev->flags & IFF_ALLMULTI) != 0,
@@ -1204,105 +1201,187 @@ static void cxgb4vf_poll_controller(struct net_device *dev)
* state of the port to which we're linked.
*/
-static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type,
- unsigned int caps)
-{
- unsigned int v = 0;
-
- if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
- type == FW_PORT_TYPE_BT_XAUI) {
- v |= SUPPORTED_TP;
- if (caps & FW_PORT_CAP_SPEED_100M)
- v |= SUPPORTED_100baseT_Full;
- if (caps & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseT_Full;
- if (caps & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseT_Full;
- } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
- v |= SUPPORTED_Backplane;
- if (caps & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseKX_Full;
- if (caps & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseKX4_Full;
- } else if (type == FW_PORT_TYPE_KR)
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
- else if (type == FW_PORT_TYPE_BP_AP)
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
- SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
- else if (type == FW_PORT_TYPE_BP4_AP)
- v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
- SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
- SUPPORTED_10000baseKX4_Full;
- else if (type == FW_PORT_TYPE_FIBER_XFI ||
- type == FW_PORT_TYPE_FIBER_XAUI ||
- type == FW_PORT_TYPE_SFP ||
- type == FW_PORT_TYPE_QSFP_10G ||
- type == FW_PORT_TYPE_QSA) {
- v |= SUPPORTED_FIBRE;
- if (caps & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseT_Full;
- if (caps & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseT_Full;
- } else if (type == FW_PORT_TYPE_BP40_BA ||
- type == FW_PORT_TYPE_QSFP) {
- v |= SUPPORTED_40000baseSR4_Full;
- v |= SUPPORTED_FIBRE;
- }
-
- if (caps & FW_PORT_CAP_ANEG)
- v |= SUPPORTED_Autoneg;
- return v;
-}
-
-static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- const struct port_info *p = netdev_priv(dev);
-
- if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
- p->port_type == FW_PORT_TYPE_BT_XFI ||
- p->port_type == FW_PORT_TYPE_BT_XAUI)
- cmd->port = PORT_TP;
- else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
- p->port_type == FW_PORT_TYPE_FIBER_XAUI)
- cmd->port = PORT_FIBRE;
- else if (p->port_type == FW_PORT_TYPE_SFP ||
- p->port_type == FW_PORT_TYPE_QSFP_10G ||
- p->port_type == FW_PORT_TYPE_QSA ||
- p->port_type == FW_PORT_TYPE_QSFP) {
- if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
- p->mod_type == FW_PORT_MOD_TYPE_SR ||
- p->mod_type == FW_PORT_MOD_TYPE_ER ||
- p->mod_type == FW_PORT_MOD_TYPE_LRM)
- cmd->port = PORT_FIBRE;
- else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
- p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
- cmd->port = PORT_DA;
+/**
+ * from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
+ * @port_type: Firmware Port Type
+ * @mod_type: Firmware Module Type
+ *
+ * Translate Firmware Port/Module type to Ethtool Port Type.
+ */
+static int from_fw_port_mod_type(enum fw_port_type port_type,
+ enum fw_port_module_type mod_type)
+{
+ if (port_type == FW_PORT_TYPE_BT_SGMII ||
+ port_type == FW_PORT_TYPE_BT_XFI ||
+ port_type == FW_PORT_TYPE_BT_XAUI) {
+ return PORT_TP;
+ } else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
+ port_type == FW_PORT_TYPE_FIBER_XAUI) {
+ return PORT_FIBRE;
+ } else if (port_type == FW_PORT_TYPE_SFP ||
+ port_type == FW_PORT_TYPE_QSFP_10G ||
+ port_type == FW_PORT_TYPE_QSA ||
+ port_type == FW_PORT_TYPE_QSFP) {
+ if (mod_type == FW_PORT_MOD_TYPE_LR ||
+ mod_type == FW_PORT_MOD_TYPE_SR ||
+ mod_type == FW_PORT_MOD_TYPE_ER ||
+ mod_type == FW_PORT_MOD_TYPE_LRM)
+ return PORT_FIBRE;
+ else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ return PORT_DA;
else
- cmd->port = PORT_OTHER;
- } else
- cmd->port = PORT_OTHER;
+ return PORT_OTHER;
+ }
+
+ return PORT_OTHER;
+}
+
+/**
+ * fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
+ * @port_type: Firmware Port Type
+ * @fw_caps: Firmware Port Capabilities
+ * @link_mode_mask: ethtool Link Mode Mask
+ *
+ * Translate a Firmware Port Capabilities specification to an ethtool
+ * Link Mode Mask.
+ */
+static void fw_caps_to_lmm(enum fw_port_type port_type,
+ unsigned int fw_caps,
+ unsigned long *link_mode_mask)
+{
+ #define SET_LMM(__lmm_name) __set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name\
+ ## _BIT, link_mode_mask)
+
+ #define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
+ do { \
+ if (fw_caps & FW_PORT_CAP_ ## __fw_name) \
+ SET_LMM(__lmm_name); \
+ } while (0)
+
+ switch (port_type) {
+ case FW_PORT_TYPE_BT_SGMII:
+ case FW_PORT_TYPE_BT_XFI:
+ case FW_PORT_TYPE_BT_XAUI:
+ SET_LMM(TP);
+ FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
+ break;
+
+ case FW_PORT_TYPE_KX4:
+ case FW_PORT_TYPE_KX:
+ SET_LMM(Backplane);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
+ break;
+
+ case FW_PORT_TYPE_KR:
+ SET_LMM(Backplane);
+ SET_LMM(10000baseKR_Full);
+ break;
+
+ case FW_PORT_TYPE_BP_AP:
+ SET_LMM(Backplane);
+ SET_LMM(10000baseR_FEC);
+ SET_LMM(10000baseKR_Full);
+ SET_LMM(1000baseKX_Full);
+ break;
+
+ case FW_PORT_TYPE_BP4_AP:
+ SET_LMM(Backplane);
+ SET_LMM(10000baseR_FEC);
+ SET_LMM(10000baseKR_Full);
+ SET_LMM(1000baseKX_Full);
+ SET_LMM(10000baseKX4_Full);
+ break;
+
+ case FW_PORT_TYPE_FIBER_XFI:
+ case FW_PORT_TYPE_FIBER_XAUI:
+ case FW_PORT_TYPE_SFP:
+ case FW_PORT_TYPE_QSFP_10G:
+ case FW_PORT_TYPE_QSA:
+ SET_LMM(FIBRE);
+ FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
+ FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
+ break;
- if (p->mdio_addr >= 0) {
- cmd->phy_address = p->mdio_addr;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
- MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+ case FW_PORT_TYPE_BP40_BA:
+ case FW_PORT_TYPE_QSFP:
+ SET_LMM(FIBRE);
+ SET_LMM(40000baseSR4_Full);
+ break;
+
+ case FW_PORT_TYPE_CR_QSFP:
+ case FW_PORT_TYPE_SFP28:
+ SET_LMM(FIBRE);
+ SET_LMM(25000baseCR_Full);
+ break;
+
+ case FW_PORT_TYPE_KR4_100G:
+ case FW_PORT_TYPE_CR4_QSFP:
+ SET_LMM(FIBRE);
+ SET_LMM(100000baseCR4_Full);
+ break;
+
+ default:
+ break;
+ }
+
+ FW_CAPS_TO_LMM(ANEG, Autoneg);
+ FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
+ FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
+
+ #undef FW_CAPS_TO_LMM
+ #undef SET_LMM
+}
+
+static int cxgb4vf_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings
+ *link_ksettings)
+{
+ const struct port_info *pi = netdev_priv(dev);
+ struct ethtool_link_settings *base = &link_ksettings->base;
+
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
+
+ base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
+
+ if (pi->mdio_addr >= 0) {
+ base->phy_address = pi->mdio_addr;
+ base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
+ ? ETH_MDIO_SUPPORTS_C22
+ : ETH_MDIO_SUPPORTS_C45);
} else {
- cmd->phy_address = 0; /* not really, but no better option */
- cmd->transceiver = XCVR_INTERNAL;
- cmd->mdio_support = 0;
- }
-
- cmd->supported = t4vf_from_fw_linkcaps(p->port_type,
- p->link_cfg.supported);
- cmd->advertising = t4vf_from_fw_linkcaps(p->port_type,
- p->link_cfg.advertising);
- ethtool_cmd_speed_set(cmd,
- netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
- cmd->duplex = DUPLEX_FULL;
- cmd->autoneg = p->link_cfg.autoneg;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ base->phy_address = 255;
+ base->mdio_support = 0;
+ }
+
+ fw_caps_to_lmm(pi->port_type, pi->link_cfg.supported,
+ link_ksettings->link_modes.supported);
+ fw_caps_to_lmm(pi->port_type, pi->link_cfg.advertising,
+ link_ksettings->link_modes.advertising);
+ fw_caps_to_lmm(pi->port_type, pi->link_cfg.lp_advertising,
+ link_ksettings->link_modes.lp_advertising);
+
+ if (netif_carrier_ok(dev)) {
+ base->speed = pi->link_cfg.speed;
+ base->duplex = DUPLEX_FULL;
+ } else {
+ base->speed = SPEED_UNKNOWN;
+ base->duplex = DUPLEX_UNKNOWN;
+ }
+
+ base->autoneg = pi->link_cfg.autoneg;
+ if (pi->link_cfg.supported & FW_PORT_CAP_ANEG)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ supported, Autoneg);
+ if (pi->link_cfg.autoneg)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, Autoneg);
+
return 0;
}
@@ -1678,7 +1757,7 @@ static void cxgb4vf_get_wol(struct net_device *dev,
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
static const struct ethtool_ops cxgb4vf_ethtool_ops = {
- .get_settings = cxgb4vf_get_settings,
+ .get_link_ksettings = cxgb4vf_get_link_ksettings,
.get_drvinfo = cxgb4vf_get_drvinfo,
.get_msglevel = cxgb4vf_get_msglevel,
.set_msglevel = cxgb4vf_set_msglevel,
@@ -1703,6 +1782,105 @@ static const struct ethtool_ops cxgb4vf_ethtool_ops = {
*/
/*
+ * Show Firmware Mailbox Command/Reply Log
+ *
+ * Note that we don't do any locking when dumping the Firmware Mailbox Log so
+ * it's possible that we can catch things during a log update and therefore
+ * see partially corrupted log entries. But i9t's probably Good Enough(tm).
+ * If we ever decide that we want to make sure that we're dumping a coherent
+ * log, we'd need to perform locking in the mailbox logging and in
+ * mboxlog_open() where we'd need to grab the entire mailbox log in one go
+ * like we do for the Firmware Device Log. But as stated above, meh ...
+ */
+static int mboxlog_show(struct seq_file *seq, void *v)
+{
+ struct adapter *adapter = seq->private;
+ struct mbox_cmd_log *log = adapter->mbox_log;
+ struct mbox_cmd *entry;
+ int entry_idx, i;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq,
+ "%10s %15s %5s %5s %s\n",
+ "Seq#", "Tstamp", "Atime", "Etime",
+ "Command/Reply");
+ return 0;
+ }
+
+ entry_idx = log->cursor + ((uintptr_t)v - 2);
+ if (entry_idx >= log->size)
+ entry_idx -= log->size;
+ entry = mbox_cmd_log_entry(log, entry_idx);
+
+ /* skip over unused entries */
+ if (entry->timestamp == 0)
+ return 0;
+
+ seq_printf(seq, "%10u %15llu %5d %5d",
+ entry->seqno, entry->timestamp,
+ entry->access, entry->execute);
+ for (i = 0; i < MBOX_LEN / 8; i++) {
+ u64 flit = entry->cmd[i];
+ u32 hi = (u32)(flit >> 32);
+ u32 lo = (u32)flit;
+
+ seq_printf(seq, " %08x %08x", hi, lo);
+ }
+ seq_puts(seq, "\n");
+ return 0;
+}
+
+static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct adapter *adapter = seq->private;
+ struct mbox_cmd_log *log = adapter->mbox_log;
+
+ return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
+}
+
+static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
+{
+ return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
+}
+
+static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return mboxlog_get_idx(seq, *pos);
+}
+
+static void mboxlog_stop(struct seq_file *seq, void *v)
+{
+}
+
+static const struct seq_operations mboxlog_seq_ops = {
+ .start = mboxlog_start,
+ .next = mboxlog_next,
+ .stop = mboxlog_stop,
+ .show = mboxlog_show
+};
+
+static int mboxlog_open(struct inode *inode, struct file *file)
+{
+ int res = seq_open(file, &mboxlog_seq_ops);
+
+ if (!res) {
+ struct seq_file *seq = file->private_data;
+
+ seq->private = inode->i_private;
+ }
+ return res;
+}
+
+static const struct file_operations mboxlog_fops = {
+ .owner = THIS_MODULE,
+ .open = mboxlog_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
* Show SGE Queue Set information. We display QPL Queues Sets per line.
*/
#define QPL 4
@@ -2121,6 +2299,7 @@ struct cxgb4vf_debugfs_entry {
};
static struct cxgb4vf_debugfs_entry debugfs_files[] = {
+ { "mboxlog", S_IRUGO, &mboxlog_fops },
{ "sge_qinfo", S_IRUGO, &sge_qinfo_debugfs_fops },
{ "sge_qstats", S_IRUGO, &sge_qstats_proc_fops },
{ "resources", S_IRUGO, &resources_proc_fops },
@@ -2663,10 +2842,22 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
+ adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
+ (sizeof(struct mbox_cmd) *
+ T4VF_OS_LOG_MBOX_CMDS),
+ GFP_KERNEL);
+ if (!adapter->mbox_log) {
+ err = -ENOMEM;
+ goto err_free_adapter;
+ }
+ adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
+
/*
* Initialize SMP data synchronization resources.
*/
spin_lock_init(&adapter->stats_lock);
+ spin_lock_init(&adapter->mbox_lock);
+ INIT_LIST_HEAD(&adapter->mlist.list);
/*
* Map our I/O registers in BAR0.
@@ -2912,6 +3103,7 @@ err_unmap_bar0:
iounmap(adapter->regs);
err_free_adapter:
+ kfree(adapter->mbox_log);
kfree(adapter);
err_release_regions:
@@ -2981,6 +3173,7 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev)
iounmap(adapter->regs);
if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
+ kfree(adapter->mbox_log);
kfree(adapter);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 1ccd282949a5..c8fd4f8fe1fa 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1188,7 +1188,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
/* Discard the packet if the length is greater than mtu */
max_pkt_len = ETH_HLEN + dev->mtu;
- if (skb_vlan_tag_present(skb))
+ if (skb_vlan_tagged(skb))
max_pkt_len += VLAN_HLEN;
if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len)))
goto out_free;
@@ -1448,7 +1448,7 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* the new TX descriptors and return success.
*/
txq_advance(&txq->q, ndesc);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
ring_tx_db(adapter, &txq->q, ndesc);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 9b40a85cc1e4..17a2bbcf93f0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -36,6 +36,7 @@
#ifndef __T4VF_COMMON_H__
#define __T4VF_COMMON_H__
+#include "../cxgb4/t4_hw.h"
#include "../cxgb4/t4fw_api.h"
#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
@@ -106,8 +107,9 @@ struct t4vf_port_stats {
struct link_config {
unsigned int supported; /* link capabilities */
unsigned int advertising; /* advertised capabilities */
- unsigned short requested_speed; /* speed user has requested */
- unsigned short speed; /* actual link speed */
+ unsigned short lp_advertising; /* peer advertised capabilities */
+ unsigned int requested_speed; /* speed user has requested */
+ unsigned int speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
@@ -227,6 +229,34 @@ struct adapter_params {
u8 nports; /* # of Ethernet "ports" */
};
+/* Firmware Mailbox Command/Reply log. All values are in Host-Endian format.
+ * The access and execute times are signed in order to accommodate negative
+ * error returns.
+ */
+struct mbox_cmd {
+ u64 cmd[MBOX_LEN / 8]; /* a Firmware Mailbox Command/Reply */
+ u64 timestamp; /* OS-dependent timestamp */
+ u32 seqno; /* sequence number */
+ s16 access; /* time (ms) to access mailbox */
+ s16 execute; /* time (ms) to execute */
+};
+
+struct mbox_cmd_log {
+ unsigned int size; /* number of entries in the log */
+ unsigned int cursor; /* next position in the log to write */
+ u32 seqno; /* next sequence number */
+ /* variable length mailbox command log starts here */
+};
+
+/* Given a pointer to a Firmware Mailbox Command Log and a log entry index,
+ * return a pointer to the specified entry.
+ */
+static inline struct mbox_cmd *mbox_cmd_log_entry(struct mbox_cmd_log *log,
+ unsigned int entry_idx)
+{
+ return &((struct mbox_cmd *)&(log)[1])[entry_idx];
+}
+
#include "adapter.h"
#ifndef PCI_VENDOR_ID_CHELSIO
@@ -241,10 +271,17 @@ static inline bool is_10g_port(const struct link_config *lc)
return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
}
+/* Return true if the Link Configuration supports "High Speeds" (those greater
+ * than 1Gb/s).
+ */
static inline bool is_x_10g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
- (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+ unsigned int speeds, high_speeds;
+
+ speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
+ high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+ return high_speeds != 0;
}
static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index fed83d88fc4e..b5622b1689e9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -76,21 +76,33 @@ static void get_mbox_rpl(struct adapter *adapter, __be64 *rpl, int size,
*rpl++ = cpu_to_be64(t4_read_reg64(adapter, mbox_data));
}
-/*
- * Dump contents of mailbox with a leading tag.
+/**
+ * t4vf_record_mbox - record a Firmware Mailbox Command/Reply in the log
+ * @adapter: the adapter
+ * @cmd: the Firmware Mailbox Command or Reply
+ * @size: command length in bytes
+ * @access: the time (ms) needed to access the Firmware Mailbox
+ * @execute: the time (ms) the command spent being executed
*/
-static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
+static void t4vf_record_mbox(struct adapter *adapter, const __be64 *cmd,
+ int size, int access, int execute)
{
- dev_err(adapter->pdev_dev,
- "mbox %s: %llx %llx %llx %llx %llx %llx %llx %llx\n", tag,
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 0),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 8),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 16),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 24),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 32),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 40),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 48),
- (unsigned long long)t4_read_reg64(adapter, mbox_data + 56));
+ struct mbox_cmd_log *log = adapter->mbox_log;
+ struct mbox_cmd *entry;
+ int i;
+
+ entry = mbox_cmd_log_entry(log, log->cursor++);
+ if (log->cursor == log->size)
+ log->cursor = 0;
+
+ for (i = 0; i < size / 8; i++)
+ entry->cmd[i] = be64_to_cpu(cmd[i]);
+ while (i < MBOX_LEN / 8)
+ entry->cmd[i++] = 0;
+ entry->timestamp = jiffies;
+ entry->seqno = log->seqno++;
+ entry->access = access;
+ entry->execute = execute;
}
/**
@@ -120,10 +132,14 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
1, 1, 3, 5, 10, 10, 20, 50, 100
};
+ u16 access = 0, execute = 0;
u32 v, mbox_data;
- int i, ms, delay_idx;
+ int i, ms, delay_idx, ret;
const __be64 *p;
u32 mbox_ctl = T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL;
+ u32 cmd_op = FW_CMD_OP_G(be32_to_cpu(((struct fw_cmd_hdr *)cmd)->hi));
+ __be64 cmd_rpl[MBOX_LEN / 8];
+ struct mbox_list entry;
/* In T6, mailbox size is changed to 128 bytes to avoid
* invalidating the entire prefetch buffer.
@@ -141,6 +157,51 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
size > NUM_CIM_VF_MAILBOX_DATA_INSTANCES * 4)
return -EINVAL;
+ /* Queue ourselves onto the mailbox access list. When our entry is at
+ * the front of the list, we have rights to access the mailbox. So we
+ * wait [for a while] till we're at the front [or bail out with an
+ * EBUSY] ...
+ */
+ spin_lock(&adapter->mbox_lock);
+ list_add_tail(&entry.list, &adapter->mlist.list);
+ spin_unlock(&adapter->mbox_lock);
+
+ delay_idx = 0;
+ ms = delay[0];
+
+ for (i = 0; ; i += ms) {
+ /* If we've waited too long, return a busy indication. This
+ * really ought to be based on our initial position in the
+ * mailbox access list but this is a start. We very rearely
+ * contend on access to the mailbox ...
+ */
+ if (i > FW_CMD_MAX_TIMEOUT) {
+ spin_lock(&adapter->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adapter->mbox_lock);
+ ret = -EBUSY;
+ t4vf_record_mbox(adapter, cmd, size, access, ret);
+ return ret;
+ }
+
+ /* If we're at the head, break out and start the mailbox
+ * protocol.
+ */
+ if (list_first_entry(&adapter->mlist.list, struct mbox_list,
+ list) == &entry)
+ break;
+
+ /* Delay for a bit before checking again ... */
+ if (sleep_ok) {
+ ms = delay[delay_idx]; /* last element may repeat */
+ if (delay_idx < ARRAY_SIZE(delay) - 1)
+ delay_idx++;
+ msleep(ms);
+ } else {
+ mdelay(ms);
+ }
+ }
+
/*
* Loop trying to get ownership of the mailbox. Return an error
* if we can't gain ownership.
@@ -148,8 +209,14 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
v = MBOWNER_G(t4_read_reg(adapter, mbox_ctl));
- if (v != MBOX_OWNER_DRV)
- return v == MBOX_OWNER_FW ? -EBUSY : -ETIMEDOUT;
+ if (v != MBOX_OWNER_DRV) {
+ spin_lock(&adapter->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adapter->mbox_lock);
+ ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
+ t4vf_record_mbox(adapter, cmd, size, access, ret);
+ return ret;
+ }
/*
* Write the command array into the Mailbox Data register array and
@@ -164,6 +231,8 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
* Data registers before doing the write to the VF Mailbox Control
* register.
*/
+ if (cmd_op != FW_VI_STATS_CMD)
+ t4vf_record_mbox(adapter, cmd, size, access, 0);
for (i = 0, p = cmd; i < size; i += 8)
t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
t4_read_reg(adapter, mbox_data); /* flush write */
@@ -209,36 +278,45 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
* We return the (negated) firmware command return
* code (this depends on FW_SUCCESS == 0).
*/
+ get_mbox_rpl(adapter, cmd_rpl, size, mbox_data);
/* return value in low-order little-endian word */
- v = t4_read_reg(adapter, mbox_data);
- if (FW_CMD_RETVAL_G(v))
- dump_mbox(adapter, "FW Error", mbox_data);
+ v = be64_to_cpu(cmd_rpl[0]);
if (rpl) {
/* request bit in high-order BE word */
WARN_ON((be32_to_cpu(*(const __be32 *)cmd)
& FW_CMD_REQUEST_F) == 0);
- get_mbox_rpl(adapter, rpl, size, mbox_data);
+ memcpy(rpl, cmd_rpl, size);
WARN_ON((be32_to_cpu(*(__be32 *)rpl)
& FW_CMD_REQUEST_F) != 0);
}
t4_write_reg(adapter, mbox_ctl,
MBOWNER_V(MBOX_OWNER_NONE));
+ execute = i + ms;
+ if (cmd_op != FW_VI_STATS_CMD)
+ t4vf_record_mbox(adapter, cmd_rpl, size, access,
+ execute);
+ spin_lock(&adapter->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adapter->mbox_lock);
return -FW_CMD_RETVAL_G(v);
}
}
- /*
- * We timed out. Return the error ...
- */
- dump_mbox(adapter, "FW Timeout", mbox_data);
- return -ETIMEDOUT;
+ /* We timed out. Return the error ... */
+ ret = -ETIMEDOUT;
+ t4vf_record_mbox(adapter, cmd, size, access, ret);
+ spin_lock(&adapter->mbox_lock);
+ list_del(&entry.list);
+ spin_unlock(&adapter->mbox_lock);
+ return ret;
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
- FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
+ FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
+ FW_PORT_CAP_ANEG)
/**
* init_link_config - initialize a link's SW state
@@ -251,6 +329,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
static void init_link_config(struct link_config *lc, unsigned int caps)
{
lc->supported = caps;
+ lc->lp_advertising = 0;
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
@@ -1634,8 +1713,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+ speed = 25000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+ speed = 100000;
/*
* Scan all of our "ports" (Virtual Interfaces) looking for
@@ -1666,6 +1749,8 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
lc->fc = fc;
lc->supported =
be16_to_cpu(port_cmd->u.info.pcap);
+ lc->lp_advertising =
+ be16_to_cpu(port_cmd->u.info.lpacap);
t4vf_os_link_changed(adapter, pidx, link_ok);
}
}
diff --git a/drivers/net/ethernet/chelsio/libcxgb/Makefile b/drivers/net/ethernet/chelsio/libcxgb/Makefile
new file mode 100644
index 000000000000..2362230ef4fe
--- /dev/null
+++ b/drivers/net/ethernet/chelsio/libcxgb/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_CHELSIO_LIB) += libcxgb.o
+
+libcxgb-y := libcxgb_ppm.o
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index d88a7a7b2400..0ed161642371 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -1,17 +1,44 @@
/*
- * cxgb4_ppm.c: Chelsio common library for T4/T5 iSCSI PagePod Manager
+ * libcxgb_ppm.c: Chelsio common library for T3/T4/T5 iSCSI PagePod Manager
*
* Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*
* Written by: Karen Xie (kxie@chelsio.com)
*/
+#define DRV_NAME "libcxgb"
+#define DRV_VERSION "1.0.0-ko"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/types.h>
@@ -22,7 +49,7 @@
#include <linux/pci.h>
#include <linux/scatterlist.h>
-#include "cxgb4_ppm.h"
+#include "libcxgb_ppm.h"
/* Direct Data Placement -
* Directly place the iSCSI Data-In or Data-Out PDU's payload into
@@ -309,6 +336,7 @@ int cxgbi_ppm_release(struct cxgbi_ppm *ppm)
}
return 1;
}
+EXPORT_SYMBOL(cxgbi_ppm_release);
static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
unsigned int *pcpu_ppmax)
@@ -462,3 +490,9 @@ unsigned int cxgbi_tagmask_set(unsigned int ppmax)
return 1 << (bits + PPOD_IDX_SHIFT);
}
+EXPORT_SYMBOL(cxgbi_tagmask_set);
+
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_DESCRIPTION("Chelsio common library");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
index d48732673b75..e995a1a3840a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ppm.h
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.h
@@ -1,17 +1,41 @@
/*
- * cxgb4_ppm.h: Chelsio common library for T4/T5 iSCSI ddp operation
+ * libcxgb_ppm.h: Chelsio common library for T3/T4/T5 iSCSI ddp operation
*
* Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*
* Written by: Karen Xie (kxie@chelsio.com)
*/
-#ifndef __CXGB4PPM_H__
-#define __CXGB4PPM_H__
+#ifndef __LIBCXGB_PPM_H__
+#define __LIBCXGB_PPM_H__
#include <linux/kernel.h>
#include <linux/errno.h>
@@ -307,4 +331,4 @@ int cxgbi_ppm_release(struct cxgbi_ppm *ppm);
void cxgbi_tagmask_check(unsigned int tagmask, struct cxgbi_tag_format *);
unsigned int cxgbi_tagmask_set(unsigned int ppmax);
-#endif /*__CXGB4PPM_H__*/
+#endif /*__LIBCXGB_PPM_H__*/
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index 60383040d6c6..c363b58552e9 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -53,6 +53,8 @@
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -1895,9 +1897,17 @@ static int cs89x0_platform_remove(struct platform_device *pdev)
return 0;
}
+static const struct __maybe_unused of_device_id cs89x0_match[] = {
+ { .compatible = "cirrus,cs8900", },
+ { .compatible = "cirrus,cs8920", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, cs89x0_match);
+
static struct platform_driver cs89x0_driver = {
.driver = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(cs89x0_match),
},
.remove = cs89x0_platform_remove,
};
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index f44a39c40642..fd3980cc1e34 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -103,25 +103,29 @@ static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
}
}
-static int enic_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+static int enic_get_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ecmd)
{
struct enic *enic = netdev_priv(netdev);
+ struct ethtool_link_settings *base = &ecmd->base;
- ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
- ecmd->advertising = (ADVERTISED_10000baseT_Full | ADVERTISED_FIBRE);
- ecmd->port = PORT_FIBRE;
- ecmd->transceiver = XCVR_EXTERNAL;
+ ethtool_link_ksettings_add_link_mode(ecmd, supported,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising,
+ 10000baseT_Full);
+ ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
+ base->port = PORT_FIBRE;
if (netif_carrier_ok(netdev)) {
- ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
- ecmd->duplex = DUPLEX_FULL;
+ base->speed = vnic_dev_port_speed(enic->vdev);
+ base->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
- ecmd->duplex = DUPLEX_UNKNOWN;
+ base->speed = SPEED_UNKNOWN;
+ base->duplex = DUPLEX_UNKNOWN;
}
- ecmd->autoneg = AUTONEG_DISABLE;
+ base->autoneg = AUTONEG_DISABLE;
return 0;
}
@@ -500,7 +504,6 @@ static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
}
static const struct ethtool_ops enic_ethtool_ops = {
- .get_settings = enic_get_settings,
.get_drvinfo = enic_get_drvinfo,
.get_msglevel = enic_get_msglevel,
.set_msglevel = enic_set_msglevel,
@@ -516,6 +519,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
.get_rxfh_key_size = enic_get_rxfh_key_size,
.get_rxfh = enic_get_rxfh,
.set_rxfh = enic_set_rxfh,
+ .get_link_ksettings = enic_get_ksettings,
};
void enic_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b2182d3ba3cc..48f82ab6c25b 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1566,7 +1566,7 @@ static int enic_request_intr(struct enic *enic)
intr = enic_msix_rq_intr(enic, i);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-rx-%d", netdev->name, i);
+ "%.11s-rx-%u", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix;
enic->msix[intr].devid = &enic->napi[i];
}
@@ -1577,7 +1577,7 @@ static int enic_request_intr(struct enic *enic)
intr = enic_msix_wq_intr(enic, i);
snprintf(enic->msix[intr].devname,
sizeof(enic->msix[intr].devname),
- "%.11s-tx-%d", netdev->name, i);
+ "%.11s-tx-%u", netdev->name, i);
enic->msix[intr].isr = enic_isr_msix;
enic->msix[intr].devid = &enic->napi[wq];
}
@@ -2740,6 +2740,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_RXCSUM;
netdev->features |= netdev->hw_features;
+ netdev->vlan_features |= netdev->features;
#ifdef CONFIG_RFS_ACCEL
netdev->hw_features |= NETIF_F_NTUPLE;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 48d91941408d..f45385f5c6e5 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -966,7 +966,7 @@ dm9000_init_dm9000(struct net_device *dev)
/* Init Driver variable */
db->tx_pkt_cnt = 0;
db->queue_pkt_len = 0;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
/* Our watchdog timed out. Called by the networking layer */
@@ -985,7 +985,7 @@ static void dm9000_timeout(struct net_device *dev)
dm9000_init_dm9000(dev);
dm9000_unmask_interrupts(db);
/* We can accept TX packets again */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
/* Restore previous register address */
@@ -1299,6 +1299,7 @@ static int
dm9000_open(struct net_device *dev)
{
struct board_info *db = netdev_priv(dev);
+ unsigned int irq_flags = irq_get_trigger_type(dev->irq);
if (netif_msg_ifup(db))
dev_dbg(db->dev, "enabling %s\n", dev->name);
@@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev)
/* If there is no IRQ type specified, tell the user that this is a
* problem
*/
- if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE)
+ if (irq_flags == IRQF_TRIGGER_NONE)
dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
+ irq_flags |= IRQF_SHARED;
+
/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
mdelay(1); /* delay needs by DM9000B */
@@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev)
/* Initialize DM9000 board */
dm9000_init_dm9000(dev);
- if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED,
- dev->name, dev))
+ if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
return -EAGAIN;
/* Now that we have an interrupt handler hooked up we can unmask
* our interrupts
@@ -1432,6 +1434,7 @@ dm9000_probe(struct platform_device *pdev)
int reset_gpios;
enum of_gpio_flags flags;
struct regulator *power;
+ bool inv_mac_addr = false;
power = devm_regulator_get(dev, "vcc");
if (IS_ERR(power)) {
@@ -1686,9 +1689,7 @@ dm9000_probe(struct platform_device *pdev)
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
- dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please "
- "set using ifconfig\n", ndev->name);
-
+ inv_mac_addr = true;
eth_hw_addr_random(ndev);
mac_src = "random";
}
@@ -1697,11 +1698,15 @@ dm9000_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
ret = register_netdev(ndev);
- if (ret == 0)
+ if (ret == 0) {
+ if (inv_mac_addr)
+ dev_warn(db->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
+ ndev->name);
printk(KERN_INFO "%s: dm9000%c at %p,%p IRQ %d MAC: %pM (%s)\n",
ndev->name, dm9000_type_to_char(db->type),
db->io_addr, db->io_data, ndev->irq,
ndev->dev_addr, mac_src);
+ }
return 0;
out:
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 3acde3b9b767..f0e9e2ef62a0 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -1319,7 +1319,7 @@ de4x5_open(struct net_device *dev)
if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
lp->adapter_name, dev)) {
- printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
+ printk("de4x5_open(): Requested IRQ%d is busy - attempting FAST/SHARE...", dev->irq);
if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
lp->adapter_name, dev)) {
printk("\n Cannot get IRQ- reconfigure your hardware.\n");
@@ -1336,7 +1336,7 @@ de4x5_open(struct net_device *dev)
}
lp->interrupt = UNMASK_INTERRUPTS;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
START_DE4X5;
@@ -1465,7 +1465,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
if (!lp->tx_enable) /* Cannot send for now */
- return NETDEV_TX_LOCKED;
+ goto tx_err;
/*
** Clean out the TX ring asynchronously to interrupts - sometimes the
@@ -1478,7 +1478,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
/* Test if cache is already locked - requeue skb if so */
if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
- return NETDEV_TX_LOCKED;
+ goto tx_err;
/* Transmit descriptor ring full or stale skb */
if (netif_queue_stopped(dev) || (u_long) lp->tx_skb[lp->tx_new] > 1) {
@@ -1519,6 +1519,9 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
lp->cache.lock = 0;
return NETDEV_TX_OK;
+tx_err:
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
}
/*
@@ -1932,7 +1935,7 @@ set_multicast_list(struct net_device *dev)
lp->tx_new = (lp->tx_new + 1) % lp->txRingSize;
outl(POLL_DEMAND, DE4X5_TPD); /* Start the TX */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
}
}
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index afd8e78e024e..8ed0fd8b1dda 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -192,9 +192,6 @@
(__CHK_IO_SIZE(((pci_dev)->device << 16) | (pci_dev)->vendor, \
(pci_dev)->revision))
-/* Sten Check */
-#define DEVICE net_device
-
/* Structure/enum declaration ------------------------------- */
struct tx_desc {
__le32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
@@ -313,10 +310,10 @@ static u8 SF_mode; /* Special Function: 1:VLAN, 2:RX Flow Control
/* function declaration ------------------------------------- */
-static int dmfe_open(struct DEVICE *);
-static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct DEVICE *);
-static int dmfe_stop(struct DEVICE *);
-static void dmfe_set_filter_mode(struct DEVICE *);
+static int dmfe_open(struct net_device *);
+static netdev_tx_t dmfe_start_xmit(struct sk_buff *, struct net_device *);
+static int dmfe_stop(struct net_device *);
+static void dmfe_set_filter_mode(struct net_device *);
static const struct ethtool_ops netdev_ethtool_ops;
static u16 read_srom_word(void __iomem *, int);
static irqreturn_t dmfe_interrupt(int , void *);
@@ -326,8 +323,8 @@ static void poll_dmfe (struct net_device *dev);
static void dmfe_descriptor_init(struct net_device *);
static void allocate_rx_buffer(struct net_device *);
static void update_cr6(u32, void __iomem *);
-static void send_filter_frame(struct DEVICE *);
-static void dm9132_id_table(struct DEVICE *);
+static void send_filter_frame(struct net_device *);
+static void dm9132_id_table(struct net_device *);
static u16 dmfe_phy_read(void __iomem *, u8, u8, u32);
static void dmfe_phy_write(void __iomem *, u8, u8, u16, u32);
static void dmfe_phy_write_1bit(void __iomem *, u32);
@@ -336,12 +333,12 @@ static u8 dmfe_sense_speed(struct dmfe_board_info *);
static void dmfe_process_mode(struct dmfe_board_info *);
static void dmfe_timer(unsigned long);
static inline u32 cal_CRC(unsigned char *, unsigned int, u8);
-static void dmfe_rx_packet(struct DEVICE *, struct dmfe_board_info *);
-static void dmfe_free_tx_pkt(struct DEVICE *, struct dmfe_board_info *);
+static void dmfe_rx_packet(struct net_device *, struct dmfe_board_info *);
+static void dmfe_free_tx_pkt(struct net_device *, struct dmfe_board_info *);
static void dmfe_reuse_skb(struct dmfe_board_info *, struct sk_buff *);
-static void dmfe_dynamic_reset(struct DEVICE *);
+static void dmfe_dynamic_reset(struct net_device *);
static void dmfe_free_rxbuffer(struct dmfe_board_info *);
-static void dmfe_init_dm910x(struct DEVICE *);
+static void dmfe_init_dm910x(struct net_device *);
static void dmfe_parse_srom(struct dmfe_board_info *);
static void dmfe_program_DM9801(struct dmfe_board_info *, int);
static void dmfe_program_DM9802(struct dmfe_board_info *);
@@ -558,7 +555,7 @@ static void dmfe_remove_one(struct pci_dev *pdev)
* The interface is opened whenever "ifconfig" actives it.
*/
-static int dmfe_open(struct DEVICE *dev)
+static int dmfe_open(struct net_device *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
const int irq = db->pdev->irq;
@@ -617,7 +614,7 @@ static int dmfe_open(struct DEVICE *dev)
* Enable Tx/Rx machine
*/
-static void dmfe_init_dm910x(struct DEVICE *dev)
+static void dmfe_init_dm910x(struct net_device *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr;
@@ -684,7 +681,7 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
*/
static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
- struct DEVICE *dev)
+ struct net_device *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr;
@@ -728,7 +725,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
db->tx_packet_cnt++; /* Ready to send */
dw32(DCR1, 0x1); /* Issue Tx polling */
- dev->trans_start = jiffies; /* saved time stamp */
+ netif_trans_update(dev); /* saved time stamp */
} else {
db->tx_queue_cnt++; /* queue TX packet */
dw32(DCR1, 0x1); /* Issue Tx polling */
@@ -754,7 +751,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
* The interface is stopped when it is brought.
*/
-static int dmfe_stop(struct DEVICE *dev)
+static int dmfe_stop(struct net_device *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr;
@@ -798,7 +795,7 @@ static int dmfe_stop(struct DEVICE *dev)
static irqreturn_t dmfe_interrupt(int irq, void *dev_id)
{
- struct DEVICE *dev = dev_id;
+ struct net_device *dev = dev_id;
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr;
unsigned long flags;
@@ -879,7 +876,7 @@ static void poll_dmfe (struct net_device *dev)
* Free TX resource after TX complete
*/
-static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
+static void dmfe_free_tx_pkt(struct net_device *dev, struct dmfe_board_info *db)
{
struct tx_desc *txptr;
void __iomem *ioaddr = db->ioaddr;
@@ -934,7 +931,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
db->tx_packet_cnt++; /* Ready to send */
db->tx_queue_cnt--;
dw32(DCR1, 0x1); /* Issue Tx polling */
- dev->trans_start = jiffies; /* saved time stamp */
+ netif_trans_update(dev); /* saved time stamp */
}
/* Resource available check */
@@ -961,7 +958,7 @@ static inline u32 cal_CRC(unsigned char * Data, unsigned int Len, u8 flag)
* Receive the come packet and pass to upper layer
*/
-static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
+static void dmfe_rx_packet(struct net_device *dev, struct dmfe_board_info *db)
{
struct rx_desc *rxptr;
struct sk_buff *skb, *newskb;
@@ -1052,7 +1049,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
* Set DM910X multicast address
*/
-static void dmfe_set_filter_mode(struct DEVICE * dev)
+static void dmfe_set_filter_mode(struct net_device *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
unsigned long flags;
@@ -1545,7 +1542,7 @@ static void send_filter_frame(struct net_device *dev)
update_cr6(db->cr6_data | 0x2000, ioaddr);
dw32(DCR1, 0x1); /* Issue Tx polling */
update_cr6(db->cr6_data, ioaddr);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
} else
db->tx_queue_cnt++; /* Put in TX queue */
}
diff --git a/drivers/net/ethernet/dec/tulip/pnic.c b/drivers/net/ethernet/dec/tulip/pnic.c
index 5364563c4378..7bcccf5cac7a 100644
--- a/drivers/net/ethernet/dec/tulip/pnic.c
+++ b/drivers/net/ethernet/dec/tulip/pnic.c
@@ -44,7 +44,7 @@ void pnic_do_nway(struct net_device *dev)
tp->csr6 = new_csr6;
/* Restart Tx */
tulip_restart_rxtx(tp);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
}
}
@@ -70,7 +70,7 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
iowrite32(tp->csr6, ioaddr + CSR6);
iowrite32(0x30, ioaddr + CSR12);
iowrite32(0x0201F078, ioaddr + 0xB8); /* Turn on autonegotiation. */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
} else if (ioread32(ioaddr + CSR5) & TPLnkPass) {
if (tulip_media_cap[dev->if_port] & MediaIsMII) {
@@ -147,7 +147,7 @@ void pnic_timer(unsigned long data)
tp->csr6 = new_csr6;
/* Restart Tx */
tulip_restart_rxtx(tp);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (tulip_debug > 1)
dev_info(&dev->dev,
"Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n",
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 94d0eebef129..bbde90bc74fe 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -605,7 +605,7 @@ static void tulip_tx_timeout(struct net_device *dev)
out_unlock:
spin_unlock_irqrestore (&tp->lock, flags);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue (dev);
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 447d09272ab7..e750b5ddc0fb 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -636,7 +636,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
txptr->tdes0 = cpu_to_le32(0x80000000); /* Set owner bit */
db->tx_packet_cnt++; /* Ready to send */
uw32(DCR1, 0x1); /* Issue Tx polling */
- dev->trans_start = jiffies; /* saved time stamp */
+ netif_trans_update(dev); /* saved time stamp */
}
/* Tx resource check */
@@ -1431,7 +1431,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
update_cr6(db->cr6_data | 0x2000, ioaddr);
uw32(DCR1, 0x1); /* Issue Tx polling */
update_cr6(db->cr6_data, ioaddr);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
} else
netdev_err(dev, "No Tx resource - Send_filter_frame!\n");
}
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 3c0e4d5c5fef..1f62b9423851 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -966,7 +966,7 @@ static void tx_timeout(struct net_device *dev)
enable_irq(irq);
netif_wake_queue(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
np->stats.tx_errors++;
}
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index f92b6d948398..78f144696d6b 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -706,7 +706,7 @@ rio_tx_timeout (struct net_device *dev)
dev->name, dr32(TxStatus));
rio_free_tx(dev, 0);
dev->if_port = 0;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
static netdev_tx_t
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index a28a2e583f0f..58c6338a839e 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1011,7 +1011,7 @@ static void tx_timeout(struct net_device *dev)
dev->if_port = 0;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index b69a9eacc531..c3b64cdd0dec 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -173,7 +173,7 @@ static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
static void dnet_handle_link_change(struct net_device *dev)
{
struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
u32 mode_reg, ctl_reg;
@@ -295,7 +295,6 @@ static int dnet_mii_probe(struct net_device *dev)
bp->link = 0;
bp->speed = 0;
bp->duplex = -1;
- bp->phy_dev = phydev;
return 0;
}
@@ -629,16 +628,16 @@ static int dnet_open(struct net_device *dev)
struct dnet *bp = netdev_priv(dev);
/* if the phy is not yet register, retry later */
- if (!bp->phy_dev)
+ if (!dev->phydev)
return -EAGAIN;
napi_enable(&bp->napi);
dnet_init_hw(bp);
- phy_start_aneg(bp->phy_dev);
+ phy_start_aneg(dev->phydev);
/* schedule a link state check */
- phy_start(bp->phy_dev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
@@ -652,8 +651,8 @@ static int dnet_close(struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&bp->napi);
- if (bp->phy_dev)
- phy_stop(bp->phy_dev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
dnet_reset_hw(bp);
netif_carrier_off(dev);
@@ -731,32 +730,9 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev)
return nstat;
}
-static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct dnet *bp = netdev_priv(dev);
- struct phy_device *phydev = bp->phy_dev;
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
@@ -776,11 +752,11 @@ static void dnet_get_drvinfo(struct net_device *dev,
}
static const struct ethtool_ops dnet_ethtool_ops = {
- .get_settings = dnet_get_settings,
- .set_settings = dnet_set_settings,
.get_drvinfo = dnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops dnet_netdev_ops = {
@@ -875,7 +851,7 @@ static int dnet_probe(struct platform_device *pdev)
(bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
(bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
(bp->capabilities & DNET_HAS_DMA) ? "" : "no ");
- phydev = bp->phy_dev;
+ phydev = dev->phydev;
phy_attached_info(phydev);
return 0;
@@ -899,8 +875,8 @@ static int dnet_remove(struct platform_device *pdev)
if (dev) {
bp = netdev_priv(dev);
- if (bp->phy_dev)
- phy_disconnect(bp->phy_dev);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
mdiobus_unregister(bp->mii_bus);
mdiobus_free(bp->mii_bus);
unregister_netdev(dev);
diff --git a/drivers/net/ethernet/dnet.h b/drivers/net/ethernet/dnet.h
index 37f5b30fa78b..d985080bbd5d 100644
--- a/drivers/net/ethernet/dnet.h
+++ b/drivers/net/ethernet/dnet.h
@@ -216,7 +216,6 @@ struct dnet {
/* PHY stuff */
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
unsigned int link;
unsigned int speed;
unsigned int duplex;
diff --git a/drivers/net/ethernet/emulex/benet/Kconfig b/drivers/net/ethernet/emulex/benet/Kconfig
index 7108563260ae..b4853ec9de8d 100644
--- a/drivers/net/ethernet/emulex/benet/Kconfig
+++ b/drivers/net/ethernet/emulex/benet/Kconfig
@@ -13,11 +13,3 @@ config BE2NET_HWMON
---help---
Say Y here if you want to expose thermal sensor data on
be2net network adapter.
-
-config BE2NET_VXLAN
- bool "VXLAN offload support on be2net driver"
- default y
- depends on BE2NET && VXLAN && !(BE2NET=y && VXLAN=m)
- ---help---
- Say Y here if you want to enable VXLAN offload support on
- be2net driver.
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index fe3763df3f13..4555e041ef69 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -97,7 +97,8 @@
* SURF/DPDK
*/
-#define MAX_RSS_IFACES 15
+#define MAX_PORT_RSS_TABLES 15
+#define MAX_NIC_FUNCS 16
#define MAX_RX_QS 32
#define MAX_EVT_QS 32
#define MAX_TX_QS 32
@@ -442,8 +443,20 @@ struct be_resources {
u16 max_iface_count;
u16 max_mcc_count;
u16 max_evt_qs;
+ u16 max_nic_evt_qs; /* NIC's share of evt qs */
u32 if_cap_flags;
u32 vf_if_cap_flags; /* VF if capability flags */
+ u32 flags;
+ /* Calculated PF Pool's share of RSS Tables. This is not enforced by
+ * the FW, but is a self-imposed driver limitation.
+ */
+ u16 max_rss_tables;
+};
+
+/* These are port-wide values */
+struct be_port_resources {
+ u16 max_vfs;
+ u16 nic_pfs;
};
#define be_is_os2bmc_enabled(adapter) (adapter->flags & BE_FLAGS_OS2BMC)
@@ -513,7 +526,8 @@ struct be_adapter {
spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */
spinlock_t mcc_cq_lock;
- u16 cfg_num_qs; /* configured via set-channels */
+ u16 cfg_num_rx_irqs; /* configured via set-channels */
+ u16 cfg_num_tx_irqs; /* configured via set-channels */
u16 num_evt_qs;
u16 num_msix_vec;
struct be_eq_obj eq_obj[MAX_EVT_QS];
@@ -632,16 +646,42 @@ struct be_adapter {
#define be_max_txqs(adapter) (adapter->res.max_tx_qs)
#define be_max_prio_txqs(adapter) (adapter->res.max_prio_tx_qs)
#define be_max_rxqs(adapter) (adapter->res.max_rx_qs)
-#define be_max_eqs(adapter) (adapter->res.max_evt_qs)
+/* Max number of EQs available for the function (NIC + RoCE (if enabled)) */
+#define be_max_func_eqs(adapter) (adapter->res.max_evt_qs)
+/* Max number of EQs available avaialble only for NIC */
+#define be_max_nic_eqs(adapter) (adapter->res.max_nic_evt_qs)
#define be_if_cap_flags(adapter) (adapter->res.if_cap_flags)
-
-static inline u16 be_max_qs(struct be_adapter *adapter)
+#define be_max_pf_pool_rss_tables(adapter) \
+ (adapter->pool_res.max_rss_tables)
+/* Max irqs avaialble for NIC */
+#define be_max_irqs(adapter) \
+ (min_t(u16, be_max_nic_eqs(adapter), num_online_cpus()))
+
+/* Max irqs *needed* for RX queues */
+static inline u16 be_max_rx_irqs(struct be_adapter *adapter)
{
- /* If no RSS, need atleast the one def RXQ */
+ /* If no RSS, need atleast one irq for def-RXQ */
u16 num = max_t(u16, be_max_rss(adapter), 1);
- num = min(num, be_max_eqs(adapter));
- return min_t(u16, num, num_online_cpus());
+ return min_t(u16, num, be_max_irqs(adapter));
+}
+
+/* Max irqs *needed* for TX queues */
+static inline u16 be_max_tx_irqs(struct be_adapter *adapter)
+{
+ return min_t(u16, be_max_txqs(adapter), be_max_irqs(adapter));
+}
+
+/* Max irqs *needed* for combined queues */
+static inline u16 be_max_qp_irqs(struct be_adapter *adapter)
+{
+ return min(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter));
+}
+
+/* Max irqs *needed* for RX and TX queues together */
+static inline u16 be_max_any_irqs(struct be_adapter *adapter)
+{
+ return max(be_max_tx_irqs(adapter), be_max_rx_irqs(adapter));
}
/* Is BE in pvid_tagging mode */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 22402db275f2..2cc11756859f 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -87,6 +87,11 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
CMD_SUBSYSTEM_LOWLEVEL,
BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
},
+ {
+ OPCODE_COMMON_SET_HSW_CONFIG,
+ CMD_SUBSYSTEM_COMMON,
+ BE_PRIV_DEVCFG | BE_PRIV_VHADM
+ },
};
static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
@@ -3850,6 +3855,10 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
void *ctxt;
int status;
+ if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_HSW_CONFIG,
+ CMD_SUBSYSTEM_COMMON))
+ return -EPERM;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -3871,7 +3880,7 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
}
- if (!BEx_chip(adapter) && hsw_mode) {
+ if (hsw_mode) {
AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
ctxt, adapter->hba_port_num);
AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
@@ -4023,7 +4032,10 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
adapter->wol_cap = resp->wol_settings;
- if (adapter->wol_cap & BE_WOL_CAP)
+
+ /* Non-zero macaddr indicates WOL is enabled */
+ if (adapter->wol_cap & BE_WOL_CAP &&
+ !is_zero_ether_addr(resp->magic_mac))
adapter->wol_en = true;
}
err:
@@ -4360,9 +4372,35 @@ err:
return status;
}
+/* This routine returns a list of all the NIC PF_nums in the adapter */
+u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums)
+{
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+ struct be_pcie_res_desc *pcie = NULL;
+ int i;
+ u16 nic_pf_count = 0;
+
+ for (i = 0; i < desc_count; i++) {
+ if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
+ hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
+ pcie = (struct be_pcie_res_desc *)hdr;
+ if (pcie->pf_state && (pcie->pf_type == MISSION_NIC ||
+ pcie->pf_type == MISSION_RDMA)) {
+ nic_pf_nums[nic_pf_count++] = pcie->pf_num;
+ }
+ }
+
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
+ }
+ return nic_pf_count;
+}
+
/* Will use MBOX only if MCCQ has not been created */
int be_cmd_get_profile_config(struct be_adapter *adapter,
- struct be_resources *res, u8 query, u8 domain)
+ struct be_resources *res,
+ struct be_port_resources *port_res,
+ u8 profile_type, u8 query, u8 domain)
{
struct be_cmd_resp_get_profile_config *resp;
struct be_cmd_req_get_profile_config *req;
@@ -4389,7 +4427,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
if (!lancer_chip(adapter))
req->hdr.version = 1;
- req->type = ACTIVE_PROFILE_TYPE;
+ req->type = profile_type;
req->hdr.domain = domain;
/* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
@@ -4406,6 +4444,28 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
resp = cmd.va;
desc_count = le16_to_cpu(resp->desc_count);
+ if (port_res) {
+ u16 nic_pf_cnt = 0, i;
+ u16 nic_pf_num_list[MAX_NIC_FUNCS];
+
+ nic_pf_cnt = be_get_nic_pf_num_list(resp->func_param,
+ desc_count,
+ nic_pf_num_list);
+
+ for (i = 0; i < nic_pf_cnt; i++) {
+ nic = be_get_func_nic_desc(resp->func_param, desc_count,
+ nic_pf_num_list[i]);
+ if (nic->link_param == adapter->port_num) {
+ port_res->nic_pfs++;
+ pcie = be_get_pcie_desc(resp->func_param,
+ desc_count,
+ nic_pf_num_list[i]);
+ port_res->max_vfs += le16_to_cpu(pcie->num_vfs);
+ }
+ }
+ return status;
+ }
+
pcie = be_get_pcie_desc(resp->func_param, desc_count,
adapter->pf_num);
if (pcie)
@@ -4465,7 +4525,7 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
}
/* Mark all fields invalid */
-static void be_reset_nic_desc(struct be_nic_res_desc *nic)
+void be_reset_nic_desc(struct be_nic_res_desc *nic)
{
memset(nic, 0, sizeof(*nic));
nic->unicast_mac_count = 0xFFFF;
@@ -4534,73 +4594,9 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
1, version, domain);
}
-static void be_fill_vf_res_template(struct be_adapter *adapter,
- struct be_resources pool_res,
- u16 num_vfs, u16 num_vf_qs,
- struct be_nic_res_desc *nic_vft)
-{
- u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
- struct be_resources res_mod = {0};
-
- /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
- * which are modifiable using SET_PROFILE_CONFIG cmd.
- */
- be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
-
- /* If RSS IFACE capability flags are modifiable for a VF, set the
- * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
- * more than 1 RSSQ is available for a VF.
- * Otherwise, provision only 1 queue pair for VF.
- */
- if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
- nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
- if (num_vf_qs > 1) {
- vf_if_cap_flags |= BE_IF_FLAGS_RSS;
- if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
- vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
- } else {
- vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
- BE_IF_FLAGS_DEFQ_RSS);
- }
- } else {
- num_vf_qs = 1;
- }
-
- if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
- nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
- vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
- }
-
- nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
- nic_vft->rq_count = cpu_to_le16(num_vf_qs);
- nic_vft->txq_count = cpu_to_le16(num_vf_qs);
- nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
- nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
- (num_vfs + 1));
-
- /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
- * among the PF and it's VFs, if the fields are changeable
- */
- if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
- nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
- (num_vfs + 1));
-
- if (res_mod.max_vlans == FIELD_MODIFIABLE)
- nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
- (num_vfs + 1));
-
- if (res_mod.max_iface_count == FIELD_MODIFIABLE)
- nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
- (num_vfs + 1));
-
- if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
- nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
- (num_vfs + 1));
-}
-
int be_cmd_set_sriov_config(struct be_adapter *adapter,
struct be_resources pool_res, u16 num_vfs,
- u16 num_vf_qs)
+ struct be_resources *vft_res)
{
struct {
struct be_pcie_res_desc pcie;
@@ -4620,12 +4616,26 @@ int be_cmd_set_sriov_config(struct be_adapter *adapter,
be_reset_nic_desc(&desc.nic_vft);
desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
- desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
+ desc.nic_vft.flags = vft_res->flags | BIT(VFT_SHIFT) |
+ BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
desc.nic_vft.pf_num = adapter->pdev->devfn;
desc.nic_vft.vf_num = 0;
-
- be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
- &desc.nic_vft);
+ desc.nic_vft.cap_flags = cpu_to_le32(vft_res->vf_if_cap_flags);
+ desc.nic_vft.rq_count = cpu_to_le16(vft_res->max_rx_qs);
+ desc.nic_vft.txq_count = cpu_to_le16(vft_res->max_tx_qs);
+ desc.nic_vft.rssq_count = cpu_to_le16(vft_res->max_rss_qs);
+ desc.nic_vft.cq_count = cpu_to_le16(vft_res->max_cq_count);
+
+ if (vft_res->max_uc_mac)
+ desc.nic_vft.unicast_mac_count =
+ cpu_to_le16(vft_res->max_uc_mac);
+ if (vft_res->max_vlans)
+ desc.nic_vft.vlan_count = cpu_to_le16(vft_res->max_vlans);
+ if (vft_res->max_iface_count)
+ desc.nic_vft.iface_count =
+ cpu_to_le16(vft_res->max_iface_count);
+ if (vft_res->max_mcc_count)
+ desc.nic_vft.mcc_count = cpu_to_le16(vft_res->max_mcc_count);
return be_cmd_set_profile_config(adapter, &desc,
2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index d8540ae95e5a..0d6be224a787 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -1556,7 +1556,9 @@ struct be_cmd_resp_acpi_wol_magic_config_v1 {
u8 rsvd0[2];
u8 wol_settings;
u8 rsvd1[5];
- u32 rsvd2[295];
+ u32 rsvd2[288];
+ u8 magic_mac[6];
+ u8 rsvd3[22];
} __packed;
#define BE_GET_WOL_CAP 2
@@ -2128,6 +2130,9 @@ struct be_cmd_req_set_ext_fat_caps {
#define IMM_SHIFT 6 /* Immediate */
#define NOSV_SHIFT 7 /* No save */
+#define MISSION_NIC 1
+#define MISSION_RDMA 8
+
struct be_res_desc_hdr {
u8 desc_type;
u8 desc_len;
@@ -2244,6 +2249,7 @@ struct be_cmd_req_get_profile_config {
struct be_cmd_req_hdr hdr;
u8 rsvd;
#define ACTIVE_PROFILE_TYPE 0x2
+#define SAVED_PROFILE_TYPE 0x0
#define QUERY_MODIFIABLE_FIELDS_TYPE BIT(3)
u8 type;
u16 rsvd1;
@@ -2449,7 +2455,9 @@ int be_cmd_query_port_name(struct be_adapter *adapter);
int be_cmd_get_func_config(struct be_adapter *adapter,
struct be_resources *res);
int be_cmd_get_profile_config(struct be_adapter *adapter,
- struct be_resources *res, u8 query, u8 domain);
+ struct be_resources *res,
+ struct be_port_resources *port_res,
+ u8 profile_type, u8 query, u8 domain);
int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
int vf_num);
@@ -2461,4 +2469,4 @@ int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
int be_cmd_set_sriov_config(struct be_adapter *adapter,
struct be_resources res, u16 num_vfs,
- u16 num_vf_qs);
+ struct be_resources *vft_res);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 2ff691636dac..50e7be5da50c 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -793,6 +793,11 @@ static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct device *dev = &adapter->pdev->dev;
+ struct be_dma_mem cmd;
+ u8 mac[ETH_ALEN];
+ bool enable;
+ int status;
if (wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
@@ -802,12 +807,32 @@ static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
}
- if (wol->wolopts & WAKE_MAGIC)
- adapter->wol_en = true;
- else
- adapter->wol_en = false;
+ cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
+ cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
+ if (!cmd.va)
+ return -ENOMEM;
- return 0;
+ eth_zero_addr(mac);
+
+ enable = wol->wolopts & WAKE_MAGIC;
+ if (enable)
+ ether_addr_copy(mac, adapter->netdev->dev_addr);
+
+ status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
+ if (status) {
+ dev_err(dev, "Could not set Wake-on-lan mac address\n");
+ status = be_cmd_status(status);
+ goto err;
+ }
+
+ pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
+
+ adapter->wol_en = enable ? true : false;
+
+err:
+ dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
+ return status;
}
static int be_test_ddr_dma(struct be_adapter *adapter)
@@ -1171,9 +1196,17 @@ static void be_get_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ u16 num_rx_irqs = max_t(u16, adapter->num_rss_qs, 1);
- ch->combined_count = adapter->num_evt_qs;
- ch->max_combined = be_max_qs(adapter);
+ /* num_tx_qs is always same as the number of irqs used for TX */
+ ch->combined_count = min(adapter->num_tx_qs, num_rx_irqs);
+ ch->rx_count = num_rx_irqs - ch->combined_count;
+ ch->tx_count = adapter->num_tx_qs - ch->combined_count;
+
+ ch->max_combined = be_max_qp_irqs(adapter);
+ /* The user must create atleast one combined channel */
+ ch->max_rx = be_max_rx_irqs(adapter) - 1;
+ ch->max_tx = be_max_tx_irqs(adapter) - 1;
}
static int be_set_channels(struct net_device *netdev,
@@ -1182,11 +1215,22 @@ static int be_set_channels(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
int status;
- if (ch->rx_count || ch->tx_count || ch->other_count ||
- !ch->combined_count || ch->combined_count > be_max_qs(adapter))
+ /* we support either only combined channels or a combination of
+ * combined and either RX-only or TX-only channels.
+ */
+ if (ch->other_count || !ch->combined_count ||
+ (ch->rx_count && ch->tx_count))
+ return -EINVAL;
+
+ if (ch->combined_count > be_max_qp_irqs(adapter) ||
+ (ch->rx_count &&
+ (ch->rx_count + ch->combined_count) > be_max_rx_irqs(adapter)) ||
+ (ch->tx_count &&
+ (ch->tx_count + ch->combined_count) > be_max_tx_irqs(adapter)))
return -EINVAL;
- adapter->cfg_num_qs = ch->combined_count;
+ adapter->cfg_num_rx_irqs = ch->combined_count + ch->rx_count;
+ adapter->cfg_num_tx_irqs = ch->combined_count + ch->tx_count;
status = be_update_queues(adapter);
return be_cmd_status(status);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 536686476369..874c7539a79d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -2620,8 +2620,10 @@ static int be_evt_queues_create(struct be_adapter *adapter)
struct be_aic_obj *aic;
int i, rc;
+ /* need enough EQs to service both RX and TX queues */
adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
- adapter->cfg_num_qs);
+ max(adapter->cfg_num_rx_irqs,
+ adapter->cfg_num_tx_irqs));
for_all_evt_queues(adapter, eqo, i) {
int numa_node = dev_to_node(&adapter->pdev->dev);
@@ -2726,7 +2728,7 @@ static int be_tx_qs_create(struct be_adapter *adapter)
struct be_eq_obj *eqo;
int status, i;
- adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
+ adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
for_all_tx_queues(adapter, txo, i) {
cq = &txo->cq;
@@ -2784,11 +2786,11 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
struct be_rx_obj *rxo;
int rc, i;
- /* We can create as many RSS rings as there are EQs. */
- adapter->num_rss_qs = adapter->num_evt_qs;
+ adapter->num_rss_qs =
+ min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
/* We'll use RSS only if atleast 2 RSS rings are supported. */
- if (adapter->num_rss_qs <= 1)
+ if (adapter->num_rss_qs < 2)
adapter->num_rss_qs = 0;
adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
@@ -3249,18 +3251,23 @@ static void be_msix_disable(struct be_adapter *adapter)
static int be_msix_enable(struct be_adapter *adapter)
{
- int i, num_vec;
+ unsigned int i, max_roce_eqs;
struct device *dev = &adapter->pdev->dev;
+ int num_vec;
- /* If RoCE is supported, program the max number of NIC vectors that
- * may be configured via set-channels, along with vectors needed for
- * RoCe. Else, just program the number we'll use initially.
+ /* If RoCE is supported, program the max number of vectors that
+ * could be used for NIC and RoCE, else, just program the number
+ * we'll use initially.
*/
- if (be_roce_supported(adapter))
- num_vec = min_t(int, 2 * be_max_eqs(adapter),
- 2 * num_online_cpus());
- else
- num_vec = adapter->cfg_num_qs;
+ if (be_roce_supported(adapter)) {
+ max_roce_eqs =
+ be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
+ max_roce_eqs = min(max_roce_eqs, num_online_cpus());
+ num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
+ } else {
+ num_vec = max(adapter->cfg_num_rx_irqs,
+ adapter->cfg_num_tx_irqs);
+ }
for (i = 0; i < num_vec; i++)
adapter->msix_entries[i].entry = i;
@@ -3625,10 +3632,8 @@ static int be_open(struct net_device *netdev)
be_link_status_update(adapter, link_status);
netif_tx_start_all_queues(netdev);
-#ifdef CONFIG_BE2NET_VXLAN
if (skyhawk_chip(adapter))
- vxlan_get_rx_port(netdev);
-#endif
+ udp_tunnel_get_rx_info(netdev);
return 0;
err:
@@ -3636,40 +3641,6 @@ err:
return -EIO;
}
-static int be_setup_wol(struct be_adapter *adapter, bool enable)
-{
- struct device *dev = &adapter->pdev->dev;
- struct be_dma_mem cmd;
- u8 mac[ETH_ALEN];
- int status;
-
- eth_zero_addr(mac);
-
- cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
- cmd.va = dma_zalloc_coherent(dev, cmd.size, &cmd.dma, GFP_KERNEL);
- if (!cmd.va)
- return -ENOMEM;
-
- if (enable) {
- status = pci_write_config_dword(adapter->pdev,
- PCICFG_PM_CONTROL_OFFSET,
- PCICFG_PM_CONTROL_MASK);
- if (status) {
- dev_err(dev, "Could not enable Wake-on-lan\n");
- goto err;
- }
- } else {
- ether_addr_copy(mac, adapter->netdev->dev_addr);
- }
-
- status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
- pci_enable_wake(adapter->pdev, PCI_D3hot, enable);
- pci_enable_wake(adapter->pdev, PCI_D3cold, enable);
-err:
- dma_free_coherent(dev, cmd.size, cmd.va, cmd.dma);
- return status;
-}
-
static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
{
u32 addr;
@@ -3759,6 +3730,11 @@ static void be_vf_clear(struct be_adapter *adapter)
be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
}
+
+ if (BE3_chip(adapter))
+ be_cmd_set_hsw_config(adapter, 0, 0,
+ adapter->if_handle,
+ PORT_FWD_TYPE_PASSTHRU, 0);
done:
kfree(adapter->vf_cfg);
adapter->num_vfs = 0;
@@ -3789,7 +3765,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
}
}
-#ifdef CONFIG_BE2NET_VXLAN
static void be_disable_vxlan_offloads(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
@@ -3808,37 +3783,87 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
}
-#endif
-static u16 be_calculate_vf_qs(struct be_adapter *adapter, u16 num_vfs)
+static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
+ struct be_resources *vft_res)
{
struct be_resources res = adapter->pool_res;
+ u32 vf_if_cap_flags = res.vf_if_cap_flags;
+ struct be_resources res_mod = {0};
u16 num_vf_qs = 1;
- /* Distribute the queue resources among the PF and it's VFs
- * Do not distribute queue resources in multi-channel configuration.
- */
- if (num_vfs && !be_is_mc(adapter)) {
- /* Divide the qpairs evenly among the VFs and the PF, capped
- * at VF-EQ-count. Any remainder qpairs belong to the PF.
- */
+ /* Distribute the queue resources among the PF and it's VFs */
+ if (num_vfs) {
+ /* Divide the rx queues evenly among the VFs and the PF, capped
+ * at VF-EQ-count. Any remainder queues belong to the PF.
+ */
num_vf_qs = min(SH_VF_MAX_NIC_EQS,
res.max_rss_qs / (num_vfs + 1));
- /* Skyhawk-R chip supports only MAX_RSS_IFACES RSS capable
- * interfaces per port. Provide RSS on VFs, only if number
- * of VFs requested is less than MAX_RSS_IFACES limit.
+ /* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
+ * RSS Tables per port. Provide RSS on VFs, only if number of
+ * VFs requested is less than it's PF Pool's RSS Tables limit.
*/
- if (num_vfs >= MAX_RSS_IFACES)
+ if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
num_vf_qs = 1;
}
- return num_vf_qs;
+
+ /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
+ * which are modifiable using SET_PROFILE_CONFIG cmd.
+ */
+ be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
+ RESOURCE_MODIFIABLE, 0);
+
+ /* If RSS IFACE capability flags are modifiable for a VF, set the
+ * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
+ * more than 1 RSSQ is available for a VF.
+ * Otherwise, provision only 1 queue pair for VF.
+ */
+ if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
+ vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+ if (num_vf_qs > 1) {
+ vf_if_cap_flags |= BE_IF_FLAGS_RSS;
+ if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
+ vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
+ } else {
+ vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
+ BE_IF_FLAGS_DEFQ_RSS);
+ }
+ } else {
+ num_vf_qs = 1;
+ }
+
+ if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
+ vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
+ vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
+ }
+
+ vft_res->vf_if_cap_flags = vf_if_cap_flags;
+ vft_res->max_rx_qs = num_vf_qs;
+ vft_res->max_rss_qs = num_vf_qs;
+ vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
+ vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
+
+ /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
+ * among the PF and it's VFs, if the fields are changeable
+ */
+ if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
+ vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
+
+ if (res_mod.max_vlans == FIELD_MODIFIABLE)
+ vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
+
+ if (res_mod.max_iface_count == FIELD_MODIFIABLE)
+ vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
+
+ if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
+ vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
}
static int be_clear(struct be_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
- u16 num_vf_qs;
+ struct be_resources vft_res = {0};
be_cancel_worker(adapter);
@@ -3850,16 +3875,15 @@ static int be_clear(struct be_adapter *adapter)
*/
if (skyhawk_chip(adapter) && be_physfn(adapter) &&
!pci_vfs_assigned(pdev)) {
- num_vf_qs = be_calculate_vf_qs(adapter,
- pci_sriov_get_totalvfs(pdev));
+ be_calculate_vf_res(adapter,
+ pci_sriov_get_totalvfs(pdev),
+ &vft_res);
be_cmd_set_sriov_config(adapter, adapter->pool_res,
pci_sriov_get_totalvfs(pdev),
- num_vf_qs);
+ &vft_res);
}
-#ifdef CONFIG_BE2NET_VXLAN
be_disable_vxlan_offloads(adapter);
-#endif
kfree(adapter->pmac_id);
adapter->pmac_id = NULL;
@@ -3884,7 +3908,8 @@ static int be_vfs_if_create(struct be_adapter *adapter)
for_all_vfs(adapter, vf_cfg, vf) {
if (!BE3_chip(adapter)) {
- status = be_cmd_get_profile_config(adapter, &res,
+ status = be_cmd_get_profile_config(adapter, &res, NULL,
+ ACTIVE_PROFILE_TYPE,
RESOURCE_LIMITS,
vf + 1);
if (!status) {
@@ -4000,6 +4025,15 @@ static int be_vf_setup(struct be_adapter *adapter)
}
}
+ if (BE3_chip(adapter)) {
+ /* On BE3, enable VEB only when SRIOV is enabled */
+ status = be_cmd_set_hsw_config(adapter, 0, 0,
+ adapter->if_handle,
+ PORT_FWD_TYPE_VEB, 0);
+ if (status)
+ goto err;
+ }
+
adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
return 0;
err:
@@ -4069,8 +4103,9 @@ static void BEx_get_resources(struct be_adapter *adapter,
/* On a SuperNIC profile, the driver needs to use the
* GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
*/
- be_cmd_get_profile_config(adapter, &super_nic_res,
- RESOURCE_LIMITS, 0);
+ be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
+ ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
+ 0);
/* Some old versions of BE3 FW don't report max_tx_qs value */
res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
} else {
@@ -4109,12 +4144,38 @@ static void be_setup_init(struct be_adapter *adapter)
adapter->cmd_privileges = MIN_PRIVILEGES;
}
+/* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
+ * However, this HW limitation is not exposed to the host via any SLI cmd.
+ * As a result, in the case of SRIOV and in particular multi-partition configs
+ * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
+ * for distribution between the VFs. This self-imposed limit will determine the
+ * no: of VFs for which RSS can be enabled.
+ */
+void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
+{
+ struct be_port_resources port_res = {0};
+ u8 rss_tables_on_port;
+ u16 max_vfs = be_max_vfs(adapter);
+
+ be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
+ RESOURCE_LIMITS, 0);
+
+ rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
+
+ /* Each PF Pool's RSS Tables limit =
+ * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
+ */
+ adapter->pool_res.max_rss_tables =
+ max_vfs * rss_tables_on_port / port_res.max_vfs;
+}
+
static int be_get_sriov_config(struct be_adapter *adapter)
{
struct be_resources res = {0};
int max_vfs, old_vfs;
- be_cmd_get_profile_config(adapter, &res, RESOURCE_LIMITS, 0);
+ be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
+ RESOURCE_LIMITS, 0);
/* Some old versions of BE3 FW don't report max_vfs value */
if (BE3_chip(adapter) && !res.max_vfs) {
@@ -4138,13 +4199,19 @@ static int be_get_sriov_config(struct be_adapter *adapter)
adapter->num_vfs = old_vfs;
}
+ if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
+ be_calculate_pf_pool_rss_tables(adapter);
+ dev_info(&adapter->pdev->dev,
+ "RSS can be enabled for all VFs if num_vfs <= %d\n",
+ be_max_pf_pool_rss_tables(adapter));
+ }
return 0;
}
static void be_alloc_sriov_res(struct be_adapter *adapter)
{
int old_vfs = pci_num_vf(adapter->pdev);
- u16 num_vf_qs;
+ struct be_resources vft_res = {0};
int status;
be_get_sriov_config(adapter);
@@ -4158,9 +4225,9 @@ static void be_alloc_sriov_res(struct be_adapter *adapter)
* Also, this is done by FW in Lancer chip.
*/
if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
- num_vf_qs = be_calculate_vf_qs(adapter, 0);
+ be_calculate_vf_res(adapter, 0, &vft_res);
status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
- num_vf_qs);
+ &vft_res);
if (status)
dev_err(&adapter->pdev->dev,
"Failed to optimize SRIOV resources\n");
@@ -4173,16 +4240,13 @@ static int be_get_resources(struct be_adapter *adapter)
struct be_resources res = {0};
int status;
- if (BEx_chip(adapter)) {
- BEx_get_resources(adapter, &res);
- adapter->res = res;
- }
-
/* For Lancer, SH etc read per-function resource limits from FW.
* GET_FUNC_CONFIG returns per function guaranteed limits.
* GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
*/
- if (!BEx_chip(adapter)) {
+ if (BEx_chip(adapter)) {
+ BEx_get_resources(adapter, &res);
+ } else {
status = be_cmd_get_func_config(adapter, &res);
if (status)
return status;
@@ -4191,13 +4255,13 @@ static int be_get_resources(struct be_adapter *adapter)
if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
!(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
res.max_rss_qs -= 1;
-
- /* If RoCE may be enabled stash away half the EQs for RoCE */
- if (be_roce_supported(adapter))
- res.max_evt_qs /= 2;
- adapter->res = res;
}
+ /* If RoCE is supported stash away half the EQs for RoCE */
+ res.max_nic_evt_qs = be_roce_supported(adapter) ?
+ res.max_evt_qs / 2 : res.max_evt_qs;
+ adapter->res = res;
+
/* If FW supports RSS default queue, then skip creating non-RSS
* queue for non-IP traffic.
*/
@@ -4206,15 +4270,17 @@ static int be_get_resources(struct be_adapter *adapter)
dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
be_max_txqs(adapter), be_max_rxqs(adapter),
- be_max_rss(adapter), be_max_eqs(adapter),
+ be_max_rss(adapter), be_max_nic_eqs(adapter),
be_max_vfs(adapter));
dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
be_max_uc(adapter), be_max_mc(adapter),
be_max_vlans(adapter));
- /* Sanitize cfg_num_qs based on HW and platform limits */
- adapter->cfg_num_qs = min_t(u16, netif_get_num_default_rss_queues(),
- be_max_qs(adapter));
+ /* Ensure RX and TX queues are created in pairs at init time */
+ adapter->cfg_num_rx_irqs =
+ min_t(u16, netif_get_num_default_rss_queues(),
+ be_max_qp_irqs(adapter));
+ adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
return 0;
}
@@ -4241,6 +4307,8 @@ static int be_get_config(struct be_adapter *adapter)
}
be_cmd_get_acpi_wol_cap(adapter);
+ pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
+ pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
be_cmd_query_port_name(adapter);
@@ -4251,15 +4319,6 @@ static int be_get_config(struct be_adapter *adapter)
"Using profile 0x%x\n", profile_id);
}
- status = be_get_resources(adapter);
- if (status)
- return status;
-
- adapter->pmac_id = kcalloc(be_max_uc(adapter),
- sizeof(*adapter->pmac_id), GFP_KERNEL);
- if (!adapter->pmac_id)
- return -ENOMEM;
-
return 0;
}
@@ -4334,7 +4393,7 @@ static int be_if_create(struct be_adapter *adapter)
u32 cap_flags = be_if_cap_flags(adapter);
int status;
- if (adapter->cfg_num_qs == 1)
+ if (adapter->cfg_num_rx_irqs == 1)
cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
en_flags &= cap_flags;
@@ -4460,13 +4519,22 @@ static int be_setup(struct be_adapter *adapter)
return status;
}
+ status = be_get_config(adapter);
+ if (status)
+ goto err;
+
if (!BE2_chip(adapter) && be_physfn(adapter))
be_alloc_sriov_res(adapter);
- status = be_get_config(adapter);
+ status = be_get_resources(adapter);
if (status)
goto err;
+ adapter->pmac_id = kcalloc(be_max_uc(adapter),
+ sizeof(*adapter->pmac_id), GFP_KERNEL);
+ if (!adapter->pmac_id)
+ return -ENOMEM;
+
status = be_msix_enable(adapter);
if (status)
goto err;
@@ -4511,6 +4579,15 @@ static int be_setup(struct be_adapter *adapter)
be_cmd_set_logical_link_config(adapter,
IFLA_VF_LINK_STATE_AUTO, 0);
+ /* BE3 EVB echoes broadcast/multicast packets back to PF's vport
+ * confusing a linux bridge or OVS that it might be connected to.
+ * Set the EVB to PASSTHRU mode which effectively disables the EVB
+ * when SRIOV is not enabled.
+ */
+ if (BE3_chip(adapter))
+ be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
+ PORT_FWD_TYPE_PASSTHRU, 0);
+
if (adapter->num_vfs)
be_vf_setup(adapter);
@@ -4651,7 +4728,6 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
0, 0, nlflags, filter_mask, NULL);
}
-#ifdef CONFIG_BE2NET_VXLAN
/* VxLAN offload Notes:
*
* The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
@@ -4666,13 +4742,17 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
* adds more than one port, disable offloads and don't re-enable them again
* until after all the tunnels are removed.
*/
-static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
- __be16 port)
+static void be_add_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct device *dev = &adapter->pdev->dev;
+ __be16 port = ti->port;
int status;
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
@@ -4720,10 +4800,14 @@ err:
be_disable_vxlan_offloads(adapter);
}
-static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
- __be16 port)
+static void be_del_vxlan_port(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ __be16 port = ti->port;
+
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
if (lancer_chip(adapter) || BEx_chip(adapter) || be_is_mc(adapter))
return;
@@ -4785,7 +4869,6 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
return features;
}
-#endif
static int be_get_phys_port_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
@@ -4833,11 +4916,9 @@ static const struct net_device_ops be_netdev_ops = {
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = be_busy_poll,
#endif
-#ifdef CONFIG_BE2NET_VXLAN
- .ndo_add_vxlan_port = be_add_vxlan_port,
- .ndo_del_vxlan_port = be_del_vxlan_port,
+ .ndo_udp_tunnel_add = be_add_vxlan_port,
+ .ndo_udp_tunnel_del = be_del_vxlan_port,
.ndo_features_check = be_features_check,
-#endif
.ndo_get_phys_port_id = be_get_phys_port_id,
};
@@ -4890,11 +4971,13 @@ static int be_resume(struct be_adapter *adapter)
if (status)
return status;
- if (netif_running(netdev)) {
+ rtnl_lock();
+ if (netif_running(netdev))
status = be_open(netdev);
- if (status)
- return status;
- }
+ rtnl_unlock();
+
+ if (status)
+ return status;
netif_device_attach(netdev);
@@ -4994,6 +5077,10 @@ static void be_worker(struct work_struct *work)
struct be_rx_obj *rxo;
int i;
+ if (be_physfn(adapter) &&
+ MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
+ be_cmd_get_die_temperature(adapter);
+
/* when interrupts are not yet enabled, just reap any pending
* mcc completions
*/
@@ -5012,10 +5099,6 @@ static void be_worker(struct work_struct *work)
be_cmd_get_stats(adapter, &adapter->stats_cmd);
}
- if (be_physfn(adapter) &&
- MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
- be_cmd_get_die_temperature(adapter);
-
for_all_rx_queues(adapter, rxo, i) {
/* Replenish RX-queues starved due to memory
* allocation failures.
@@ -5408,9 +5491,6 @@ static int be_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
- if (adapter->wol_en)
- be_setup_wol(adapter, true);
-
be_intr_set(adapter, false);
be_cancel_err_detection(adapter);
@@ -5439,9 +5519,6 @@ static int be_pci_resume(struct pci_dev *pdev)
be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
- if (adapter->wol_en)
- be_setup_wol(adapter, false);
-
return 0;
}
@@ -5550,7 +5627,7 @@ err:
static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
- u16 num_vf_qs;
+ struct be_resources vft_res = {0};
int status;
if (!num_vfs)
@@ -5573,9 +5650,10 @@ static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
* Also, this is done by FW in Lancer chip.
*/
if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
- num_vf_qs = be_calculate_vf_qs(adapter, adapter->num_vfs);
+ be_calculate_vf_res(adapter, adapter->num_vfs,
+ &vft_res);
status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
- adapter->num_vfs, num_vf_qs);
+ adapter->num_vfs, &vft_res);
if (status)
dev_err(&pdev->dev,
"Failed to optimize SR-IOV resources\n");
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 4089156a7f5e..2b62841c4c63 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index fde609789483..e51719a7307f 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2015 Emulex
+ * Copyright (C) 2005 - 2016 Broadcom
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 41b010645100..c044667a0a25 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -192,7 +192,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* @napi: NAPI structure
* @msg_enable: device state flags
* @lock: device lock
- * @phy: attached PHY
* @mdio: MDIO bus for PHY access
* @phy_id: address of attached PHY
*/
@@ -219,7 +218,6 @@ struct ethoc {
spinlock_t lock;
- struct phy_device *phy;
struct mii_bus *mdio;
struct clk *clk;
s8 phy_id;
@@ -694,7 +692,6 @@ static int ethoc_mdio_probe(struct net_device *dev)
return err;
}
- priv->phy = phy;
phy->advertising &= ~(ADVERTISED_1000baseT_Full |
ADVERTISED_1000baseT_Half);
phy->supported &= ~(SUPPORTED_1000baseT_Full |
@@ -724,7 +721,7 @@ static int ethoc_open(struct net_device *dev)
netif_start_queue(dev);
}
- phy_start(priv->phy);
+ phy_start(dev->phydev);
napi_enable(&priv->napi);
if (netif_msg_ifup(priv)) {
@@ -741,8 +738,8 @@ static int ethoc_stop(struct net_device *dev)
napi_disable(&priv->napi);
- if (priv->phy)
- phy_stop(priv->phy);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
ethoc_disable_rx_and_tx(priv);
free_irq(dev->irq, dev);
@@ -770,7 +767,7 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!phy)
return -ENODEV;
} else {
- phy = priv->phy;
+ phy = dev->phydev;
}
return phy_mii_ioctl(phy, ifr, cmd);
@@ -860,6 +857,11 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int entry;
void *dest;
+ if (skb_put_padto(skb, ETHOC_ZLEN)) {
+ dev->stats.tx_errors++;
+ goto out_no_free;
+ }
+
if (unlikely(skb->len > ETHOC_BUFSIZ)) {
dev->stats.tx_errors++;
goto out;
@@ -894,31 +896,10 @@ static netdev_tx_t ethoc_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
out:
dev_kfree_skb(skb);
+out_no_free:
return NETDEV_TX_OK;
}
-static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ethoc *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phy;
-
- if (!phydev)
- return -EOPNOTSUPP;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ethoc *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phy;
-
- if (!phydev)
- return -EOPNOTSUPP;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static int ethoc_get_regs_len(struct net_device *netdev)
{
return ETH_END;
@@ -983,14 +964,14 @@ static int ethoc_set_ringparam(struct net_device *dev,
}
const struct ethtool_ops ethoc_ethtool_ops = {
- .get_settings = ethoc_get_settings,
- .set_settings = ethoc_set_settings,
.get_regs_len = ethoc_get_regs_len,
.get_regs = ethoc_get_regs,
.get_link = ethtool_op_get_link,
.get_ringparam = ethoc_get_ringparam,
.set_ringparam = ethoc_set_ringparam,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops ethoc_netdev_ops = {
@@ -1086,7 +1067,7 @@ static int ethoc_probe(struct platform_device *pdev)
if (!priv->iobase) {
dev_err(&pdev->dev, "cannot remap I/O memory space\n");
ret = -ENXIO;
- goto error;
+ goto free;
}
if (netdev->mem_end) {
@@ -1095,7 +1076,7 @@ static int ethoc_probe(struct platform_device *pdev)
if (!priv->membase) {
dev_err(&pdev->dev, "cannot remap memory space\n");
ret = -ENXIO;
- goto error;
+ goto free;
}
} else {
/* Allocate buffer memory */
@@ -1106,7 +1087,7 @@ static int ethoc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "cannot allocate %dB buffer\n",
buffer_size);
ret = -ENOMEM;
- goto error;
+ goto free;
}
netdev->mem_end = netdev->mem_start + buffer_size;
priv->dma_alloc = buffer_size;
@@ -1120,7 +1101,7 @@ static int ethoc_probe(struct platform_device *pdev)
128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
if (num_bd < 4) {
ret = -ENODEV;
- goto error;
+ goto free;
}
priv->num_bd = num_bd;
/* num_tx must be a power of two */
@@ -1133,7 +1114,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void *), GFP_KERNEL);
if (!priv->vma) {
ret = -ENOMEM;
- goto error;
+ goto free;
}
/* Allow the platform setup code to pass in a MAC address. */
@@ -1195,7 +1176,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv->mdio = mdiobus_alloc();
if (!priv->mdio) {
ret = -ENOMEM;
- goto free;
+ goto free2;
}
priv->mdio->name = "ethoc-mdio";
@@ -1208,7 +1189,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = mdiobus_register(priv->mdio);
if (ret) {
dev_err(&netdev->dev, "failed to register MDIO bus\n");
- goto free;
+ goto free2;
}
ret = ethoc_mdio_probe(netdev);
@@ -1241,9 +1222,10 @@ error2:
error:
mdiobus_unregister(priv->mdio);
mdiobus_free(priv->mdio);
-free:
+free2:
if (priv->clk)
clk_disable_unprepare(priv->clk);
+free:
free_netdev(netdev);
out:
return ret;
@@ -1260,8 +1242,7 @@ static int ethoc_remove(struct platform_device *pdev)
if (netdev) {
netif_napi_del(&priv->napi);
- phy_disconnect(priv->phy);
- priv->phy = NULL;
+ phy_disconnect(netdev->phydev);
if (priv->mdio) {
mdiobus_unregister(priv->mdio);
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 085f9125cf42..f928e6f79c89 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -24,6 +24,14 @@
#define DRV_NAME "nps_mgt_enet"
+static inline bool nps_enet_is_tx_pending(struct nps_enet_priv *priv)
+{
+ u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
+ u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
+
+ return (!tx_ctrl_ct && priv->tx_skb);
+}
+
static void nps_enet_clean_rx_fifo(struct net_device *ndev, u32 frame_len)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
@@ -46,16 +54,17 @@ static void nps_enet_read_rx_fifo(struct net_device *ndev,
if (dst_is_aligned) {
ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, reg, len);
reg += len;
- }
- else { /* !dst_is_aligned */
+ } else { /* !dst_is_aligned */
for (i = 0; i < len; i++, reg++) {
u32 buf = nps_enet_reg_get(priv, NPS_ENET_REG_RX_BUF);
+
put_unaligned_be32(buf, reg);
}
}
/* copy last bytes (if any) */
if (last) {
u32 buf;
+
ioread32_rep(priv->regs_base + NPS_ENET_REG_RX_BUF, &buf, 1);
memcpy((u8 *)reg, &buf, last);
}
@@ -140,12 +149,11 @@ static void nps_enet_tx_handler(struct net_device *ndev)
{
struct nps_enet_priv *priv = netdev_priv(ndev);
u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
- u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
u32 tx_ctrl_et = (tx_ctrl_value & TX_CTL_ET_MASK) >> TX_CTL_ET_SHIFT;
u32 tx_ctrl_nt = (tx_ctrl_value & TX_CTL_NT_MASK) >> TX_CTL_NT_SHIFT;
/* Check if we got TX */
- if (!priv->tx_skb || tx_ctrl_ct)
+ if (!nps_enet_is_tx_pending(priv))
return;
/* Ack Tx ctrl register */
@@ -183,9 +191,6 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
work_done = nps_enet_rx_handler(ndev);
if (work_done < budget) {
u32 buf_int_enable_value = 0;
- u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
- u32 tx_ctrl_ct =
- (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
napi_complete(napi);
@@ -204,9 +209,10 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
* the two code lines below will solve this situation by
* re-adding ourselves to the poll list.
*/
-
- if (priv->tx_skb && !tx_ctrl_ct)
+ if (nps_enet_is_tx_pending(priv)) {
+ nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
napi_reschedule(napi);
+ }
}
return work_done;
@@ -228,11 +234,9 @@ static irqreturn_t nps_enet_irq_handler(s32 irq, void *dev_instance)
struct net_device *ndev = dev_instance;
struct nps_enet_priv *priv = netdev_priv(ndev);
u32 rx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_RX_CTL);
- u32 tx_ctrl_value = nps_enet_reg_get(priv, NPS_ENET_REG_TX_CTL);
- u32 tx_ctrl_ct = (tx_ctrl_value & TX_CTL_CT_MASK) >> TX_CTL_CT_SHIFT;
u32 rx_ctrl_cr = (rx_ctrl_value & RX_CTL_CR_MASK) >> RX_CTL_CR_SHIFT;
- if ((!tx_ctrl_ct && priv->tx_skb) || rx_ctrl_cr)
+ if (nps_enet_is_tx_pending(priv) || rx_ctrl_cr)
if (likely(napi_schedule_prep(&priv->napi))) {
nps_enet_reg_set(priv, NPS_ENET_REG_BUF_INT_ENABLE, 0);
__napi_schedule(&priv->napi);
@@ -283,6 +287,7 @@ static void nps_enet_hw_reset(struct net_device *ndev)
ge_rst_value |= NPS_ENET_ENABLE << RST_GMAC_0_SHIFT;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
usleep_range(10, 20);
+ ge_rst_value = 0;
nps_enet_reg_set(priv, NPS_ENET_REG_GE_RST, ge_rst_value);
/* Tx fifo reset sequence */
@@ -457,7 +462,6 @@ static void nps_enet_set_rx_mode(struct net_device *ndev)
| NPS_ENET_ENABLE << CFG_2_DISK_DA_SHIFT;
ge_mac_cfg_2_value = (ge_mac_cfg_2_value & ~CFG_2_DISK_MC_MASK)
| NPS_ENET_ENABLE << CFG_2_DISK_MC_SHIFT;
-
}
nps_enet_reg_set(priv, NPS_ENET_REG_GE_MAC_CFG_2, ge_mac_cfg_2_value);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 84384e1585a5..36361f8bf894 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -31,6 +31,7 @@
#include <linux/phy.h>
#include <linux/platform_device.h>
#include <net/ip.h>
+#include <net/ncsi.h>
#include "ftgmac100.h"
@@ -68,11 +69,14 @@ struct ftgmac100 {
struct net_device *netdev;
struct device *dev;
+ struct ncsi_dev *ndev;
struct napi_struct napi;
struct mii_bus *mii_bus;
- struct phy_device *phydev;
int old_speed;
+ int int_mask_all;
+ bool use_ncsi;
+ bool enabled;
};
static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
@@ -81,14 +85,6 @@ static int ftgmac100_alloc_rx_page(struct ftgmac100 *priv,
/******************************************************************************
* internal functions (hardware register access)
*****************************************************************************/
-#define INT_MASK_ALL_ENABLED (FTGMAC100_INT_RPKT_LOST | \
- FTGMAC100_INT_XPKT_ETH | \
- FTGMAC100_INT_XPKT_LOST | \
- FTGMAC100_INT_AHB_ERR | \
- FTGMAC100_INT_PHYSTS_CHG | \
- FTGMAC100_INT_RPKT_BUF | \
- FTGMAC100_INT_NO_RXBUF)
-
static void ftgmac100_set_rx_ring_base(struct ftgmac100 *priv, dma_addr_t addr)
{
iowrite32(addr, priv->base + FTGMAC100_OFFSET_RXR_BADR);
@@ -142,6 +138,55 @@ static void ftgmac100_set_mac(struct ftgmac100 *priv, const unsigned char *mac)
iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
}
+static void ftgmac100_setup_mac(struct ftgmac100 *priv)
+{
+ u8 mac[ETH_ALEN];
+ unsigned int m;
+ unsigned int l;
+ void *addr;
+
+ addr = device_get_mac_address(priv->dev, mac, ETH_ALEN);
+ if (addr) {
+ ether_addr_copy(priv->netdev->dev_addr, mac);
+ dev_info(priv->dev, "Read MAC address %pM from device tree\n",
+ mac);
+ return;
+ }
+
+ m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR);
+ l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR);
+
+ mac[0] = (m >> 8) & 0xff;
+ mac[1] = m & 0xff;
+ mac[2] = (l >> 24) & 0xff;
+ mac[3] = (l >> 16) & 0xff;
+ mac[4] = (l >> 8) & 0xff;
+ mac[5] = l & 0xff;
+
+ if (is_valid_ether_addr(mac)) {
+ ether_addr_copy(priv->netdev->dev_addr, mac);
+ dev_info(priv->dev, "Read MAC address %pM from chip\n", mac);
+ } else {
+ eth_hw_addr_random(priv->netdev);
+ dev_info(priv->dev, "Generated random MAC address %pM\n",
+ priv->netdev->dev_addr);
+ }
+}
+
+static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
+{
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(dev, p);
+ if (ret < 0)
+ return ret;
+
+ eth_commit_mac_addr_change(dev, p);
+ ftgmac100_set_mac(netdev_priv(dev), dev->dev_addr);
+
+ return 0;
+}
+
static void ftgmac100_init_hw(struct ftgmac100 *priv)
{
/* setup ring buffer base registers */
@@ -807,7 +852,7 @@ err:
static void ftgmac100_adjust_link(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = netdev->phydev;
int ier;
if (phydev->speed == priv->old_speed)
@@ -850,7 +895,6 @@ static int ftgmac100_mii_probe(struct ftgmac100 *priv)
return PTR_ERR(phydev);
}
- priv->phydev = phydev;
return 0;
}
@@ -939,27 +983,11 @@ static void ftgmac100_get_drvinfo(struct net_device *netdev,
strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
-static int ftgmac100_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
-static int ftgmac100_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct ftgmac100 *priv = netdev_priv(netdev);
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
static const struct ethtool_ops ftgmac100_ethtool_ops = {
- .set_settings = ftgmac100_set_settings,
- .get_settings = ftgmac100_get_settings,
.get_drvinfo = ftgmac100_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/******************************************************************************
@@ -970,7 +998,10 @@ static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
struct net_device *netdev = dev_id;
struct ftgmac100 *priv = netdev_priv(netdev);
- if (likely(netif_running(netdev))) {
+ /* When running in NCSI mode, the interface should be ready for
+ * receiving or transmitting NCSI packets before it's opened.
+ */
+ if (likely(priv->use_ncsi || netif_running(netdev))) {
/* Disable interrupts for polling */
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
napi_schedule(&priv->napi);
@@ -1023,8 +1054,9 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
ftgmac100_tx_complete(priv);
}
- if (status & (FTGMAC100_INT_NO_RXBUF | FTGMAC100_INT_RPKT_LOST |
- FTGMAC100_INT_AHB_ERR | FTGMAC100_INT_PHYSTS_CHG)) {
+ if (status & priv->int_mask_all & (FTGMAC100_INT_NO_RXBUF |
+ FTGMAC100_INT_RPKT_LOST | FTGMAC100_INT_AHB_ERR |
+ FTGMAC100_INT_PHYSTS_CHG)) {
if (net_ratelimit())
netdev_info(netdev, "[ISR] = 0x%x: %s%s%s%s\n", status,
status & FTGMAC100_INT_NO_RXBUF ? "NO_RXBUF " : "",
@@ -1047,7 +1079,8 @@ static int ftgmac100_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
/* enable all interrupts */
- iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER);
+ iowrite32(priv->int_mask_all,
+ priv->base + FTGMAC100_OFFSET_IER);
}
return rx;
@@ -1083,17 +1116,33 @@ static int ftgmac100_open(struct net_device *netdev)
goto err_hw;
ftgmac100_init_hw(priv);
- ftgmac100_start_hw(priv, 10);
-
- phy_start(priv->phydev);
+ ftgmac100_start_hw(priv, priv->use_ncsi ? 100 : 10);
+ if (netdev->phydev)
+ phy_start(netdev->phydev);
+ else if (priv->use_ncsi)
+ netif_carrier_on(netdev);
napi_enable(&priv->napi);
netif_start_queue(netdev);
/* enable all interrupts */
- iowrite32(INT_MASK_ALL_ENABLED, priv->base + FTGMAC100_OFFSET_IER);
+ iowrite32(priv->int_mask_all, priv->base + FTGMAC100_OFFSET_IER);
+
+ /* Start the NCSI device */
+ if (priv->use_ncsi) {
+ err = ncsi_start_dev(priv->ndev);
+ if (err)
+ goto err_ncsi;
+ }
+
+ priv->enabled = true;
+
return 0;
+err_ncsi:
+ napi_disable(&priv->napi);
+ netif_stop_queue(netdev);
+ iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
err_hw:
free_irq(priv->irq, netdev);
err_irq:
@@ -1106,12 +1155,17 @@ static int ftgmac100_stop(struct net_device *netdev)
{
struct ftgmac100 *priv = netdev_priv(netdev);
+ if (!priv->enabled)
+ return 0;
+
/* disable all interrupts */
+ priv->enabled = false;
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
netif_stop_queue(netdev);
napi_disable(&priv->napi);
- phy_stop(priv->phydev);
+ if (netdev->phydev)
+ phy_stop(netdev->phydev);
ftgmac100_stop_hw(priv);
free_irq(priv->irq, netdev);
@@ -1152,20 +1206,84 @@ static int ftgmac100_hard_start_xmit(struct sk_buff *skb,
/* optional */
static int ftgmac100_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
- struct ftgmac100 *priv = netdev_priv(netdev);
+ if (!netdev->phydev)
+ return -ENXIO;
- return phy_mii_ioctl(priv->phydev, ifr, cmd);
+ return phy_mii_ioctl(netdev->phydev, ifr, cmd);
}
static const struct net_device_ops ftgmac100_netdev_ops = {
.ndo_open = ftgmac100_open,
.ndo_stop = ftgmac100_stop,
.ndo_start_xmit = ftgmac100_hard_start_xmit,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = ftgmac100_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = ftgmac100_do_ioctl,
};
+static int ftgmac100_setup_mdio(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+ struct platform_device *pdev = to_platform_device(priv->dev);
+ int i, err = 0;
+
+ /* initialize mdio bus */
+ priv->mii_bus = mdiobus_alloc();
+ if (!priv->mii_bus)
+ return -EIO;
+
+ priv->mii_bus->name = "ftgmac100_mdio";
+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
+ pdev->name, pdev->id);
+ priv->mii_bus->priv = priv->netdev;
+ priv->mii_bus->read = ftgmac100_mdiobus_read;
+ priv->mii_bus->write = ftgmac100_mdiobus_write;
+
+ for (i = 0; i < PHY_MAX_ADDR; i++)
+ priv->mii_bus->irq[i] = PHY_POLL;
+
+ err = mdiobus_register(priv->mii_bus);
+ if (err) {
+ dev_err(priv->dev, "Cannot register MDIO bus!\n");
+ goto err_register_mdiobus;
+ }
+
+ err = ftgmac100_mii_probe(priv);
+ if (err) {
+ dev_err(priv->dev, "MII Probe failed!\n");
+ goto err_mii_probe;
+ }
+
+ return 0;
+
+err_mii_probe:
+ mdiobus_unregister(priv->mii_bus);
+err_register_mdiobus:
+ mdiobus_free(priv->mii_bus);
+ return err;
+}
+
+static void ftgmac100_destroy_mdio(struct net_device *netdev)
+{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
+ if (!netdev->phydev)
+ return;
+
+ phy_disconnect(netdev->phydev);
+ mdiobus_unregister(priv->mii_bus);
+ mdiobus_free(priv->mii_bus);
+}
+
+static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
+{
+ if (unlikely(nd->state != ncsi_dev_state_functional))
+ return;
+
+ netdev_info(nd->dev, "NCSI interface %s\n",
+ nd->link_up ? "up" : "down");
+}
+
/******************************************************************************
* struct platform_driver functions
*****************************************************************************/
@@ -1175,7 +1293,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
int irq;
struct net_device *netdev;
struct ftgmac100 *priv;
- int err;
+ int err = 0;
if (!pdev)
return -ENODEV;
@@ -1199,7 +1317,6 @@ static int ftgmac100_probe(struct platform_device *pdev)
netdev->ethtool_ops = &ftgmac100_ethtool_ops;
netdev->netdev_ops = &ftgmac100_netdev_ops;
- netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
platform_set_drvdata(pdev, netdev);
@@ -1231,31 +1348,45 @@ static int ftgmac100_probe(struct platform_device *pdev)
priv->irq = irq;
- /* initialize mdio bus */
- priv->mii_bus = mdiobus_alloc();
- if (!priv->mii_bus) {
- err = -EIO;
- goto err_alloc_mdiobus;
- }
-
- priv->mii_bus->name = "ftgmac100_mdio";
- snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "ftgmac100_mii");
-
- priv->mii_bus->priv = netdev;
- priv->mii_bus->read = ftgmac100_mdiobus_read;
- priv->mii_bus->write = ftgmac100_mdiobus_write;
+ /* MAC address from chip or random one */
+ ftgmac100_setup_mac(priv);
+
+ priv->int_mask_all = (FTGMAC100_INT_RPKT_LOST |
+ FTGMAC100_INT_XPKT_ETH |
+ FTGMAC100_INT_XPKT_LOST |
+ FTGMAC100_INT_AHB_ERR |
+ FTGMAC100_INT_PHYSTS_CHG |
+ FTGMAC100_INT_RPKT_BUF |
+ FTGMAC100_INT_NO_RXBUF);
+ if (pdev->dev.of_node &&
+ of_get_property(pdev->dev.of_node, "use-ncsi", NULL)) {
+ if (!IS_ENABLED(CONFIG_NET_NCSI)) {
+ dev_err(&pdev->dev, "NCSI stack not enabled\n");
+ goto err_ncsi_dev;
+ }
- err = mdiobus_register(priv->mii_bus);
- if (err) {
- dev_err(&pdev->dev, "Cannot register MDIO bus!\n");
- goto err_register_mdiobus;
+ dev_info(&pdev->dev, "Using NCSI interface\n");
+ priv->use_ncsi = true;
+ priv->int_mask_all &= ~FTGMAC100_INT_PHYSTS_CHG;
+ priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
+ if (!priv->ndev)
+ goto err_ncsi_dev;
+ } else {
+ priv->use_ncsi = false;
+ err = ftgmac100_setup_mdio(netdev);
+ if (err)
+ goto err_setup_mdio;
}
- err = ftgmac100_mii_probe(priv);
- if (err) {
- dev_err(&pdev->dev, "MII Probe failed!\n");
- goto err_mii_probe;
- }
+ /* We have to disable on-chip IP checksum functionality
+ * when NCSI is enabled on the interface. It doesn't work
+ * in that case.
+ */
+ netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
+ if (priv->use_ncsi &&
+ of_get_property(pdev->dev.of_node, "no-hw-checksum", NULL))
+ netdev->features &= ~NETIF_F_IP_CSUM;
+
/* register network device */
err = register_netdev(netdev);
@@ -1266,21 +1397,12 @@ static int ftgmac100_probe(struct platform_device *pdev)
netdev_info(netdev, "irq %d, mapped at %p\n", priv->irq, priv->base);
- if (!is_valid_ether_addr(netdev->dev_addr)) {
- eth_hw_addr_random(netdev);
- netdev_info(netdev, "generated random MAC address %pM\n",
- netdev->dev_addr);
- }
-
return 0;
+err_ncsi_dev:
err_register_netdev:
- phy_disconnect(priv->phydev);
-err_mii_probe:
- mdiobus_unregister(priv->mii_bus);
-err_register_mdiobus:
- mdiobus_free(priv->mii_bus);
-err_alloc_mdiobus:
+ ftgmac100_destroy_mdio(netdev);
+err_setup_mdio:
iounmap(priv->base);
err_ioremap:
release_resource(priv->res);
@@ -1300,10 +1422,7 @@ static int __exit ftgmac100_remove(struct platform_device *pdev)
priv = netdev_priv(netdev);
unregister_netdev(netdev);
-
- phy_disconnect(priv->phydev);
- mdiobus_unregister(priv->mii_bus);
- mdiobus_free(priv->mii_bus);
+ ftgmac100_destroy_mdio(netdev);
iounmap(priv->base);
release_resource(priv->res);
@@ -1313,14 +1432,20 @@ static int __exit ftgmac100_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id ftgmac100_of_match[] = {
+ { .compatible = "faraday,ftgmac100" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
+
static struct platform_driver ftgmac100_driver = {
- .probe = ftgmac100_probe,
- .remove = __exit_p(ftgmac100_remove),
- .driver = {
- .name = DRV_NAME,
+ .probe = ftgmac100_probe,
+ .remove = __exit_p(ftgmac100_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = ftgmac100_of_match,
},
};
-
module_platform_driver(ftgmac100_driver);
MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index b1b9ebafb354..c08bd763172a 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1227,7 +1227,7 @@ static void fealnx_tx_timeout(struct net_device *dev)
spin_unlock_irqrestore(&np->lock, flags);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev); /* or .._start_.. ?? */
}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 195122e11f10..c865135f3cb9 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -442,6 +442,10 @@ struct bufdesc_ex {
#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
/* Controller supports RACC register */
#define FEC_QUIRK_HAS_RACC (1 << 12)
+/* Controller supports interrupt coalesc */
+#define FEC_QUIRK_HAS_COALESCE (1 << 13)
+/* Interrupt doesn't wake CPU from deep idle */
+#define FEC_QUIRK_ERR006687 (1 << 14)
struct bufdesc_prop {
int qid;
@@ -517,7 +521,6 @@ struct fec_enet_private {
/* Phylib and MDIO interface */
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
int mii_timeout;
uint phy_speed;
phy_interface_t phy_interface;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 2a03857cca18..692ee248e486 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -60,6 +60,7 @@
#include <linux/if_vlan.h>
#include <linux/pinctrl/consumer.h>
#include <linux/prefetch.h>
+#include <soc/imx/cpuidle.h>
#include <asm/cacheflush.h>
@@ -88,10 +89,10 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx25-fec",
- .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC,
+ .driver_data = FEC_QUIRK_USE_GASKET,
}, {
.name = "imx27-fec",
- .driver_data = FEC_QUIRK_HAS_RACC,
+ .driver_data = 0,
}, {
.name = "imx28-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
@@ -111,7 +112,13 @@ static struct platform_device_id fec_devtype[] = {
FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
- FEC_QUIRK_HAS_RACC,
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
+ }, {
+ .name = "imx6ul-fec",
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_BUG_CAPTURE |
+ FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE,
}, {
/* sentinel */
}
@@ -125,6 +132,7 @@ enum imx_fec_type {
IMX6Q_FEC,
MVF600_FEC,
IMX6SX_FEC,
+ IMX6UL_FEC,
};
static const struct of_device_id fec_dt_ids[] = {
@@ -134,6 +142,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
{ .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
{ .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
+ { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -171,6 +180,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
/* FEC receive acceleration */
#define FEC_RACC_IPDIS (1 << 1)
#define FEC_RACC_PRODIS (1 << 2)
+#define FEC_RACC_SHIFT16 BIT(7)
#define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS)
/*
@@ -936,9 +946,11 @@ fec_restart(struct net_device *ndev)
#if !defined(CONFIG_M5272)
if (fep->quirks & FEC_QUIRK_HAS_RACC) {
- /* set RX checksum */
val = readl(fep->hwp + FEC_RACC);
+ /* align IP header */
+ val |= FEC_RACC_SHIFT16;
if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
+ /* set RX checksum */
val |= FEC_RACC_OPTIONS;
else
val &= ~FEC_RACC_OPTIONS;
@@ -967,10 +979,10 @@ fec_restart(struct net_device *ndev)
rcntl &= ~(1 << 8);
/* 1G, 100M or 10M */
- if (fep->phy_dev) {
- if (fep->phy_dev->speed == SPEED_1000)
+ if (ndev->phydev) {
+ if (ndev->phydev->speed == SPEED_1000)
ecntl |= (1 << 5);
- else if (fep->phy_dev->speed == SPEED_100)
+ else if (ndev->phydev->speed == SPEED_100)
rcntl &= ~(1 << 9);
else
rcntl |= (1 << 9);
@@ -991,7 +1003,7 @@ fec_restart(struct net_device *ndev)
*/
cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
- if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
+ if (ndev->phydev && ndev->phydev->speed == SPEED_10)
cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
@@ -1005,7 +1017,7 @@ fec_restart(struct net_device *ndev)
/* enable pause frame*/
if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
- fep->phy_dev && fep->phy_dev->pause)) {
+ ndev->phydev && ndev->phydev->pause)) {
rcntl |= FEC_ENET_FCE;
/* set FIFO threshold parameter to reduce overrun */
@@ -1197,10 +1209,8 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
fec16_to_cpu(bdp->cbd_datlen),
DMA_TO_DEVICE);
bdp->cbd_bufaddr = cpu_to_fec32(0);
- if (!skb) {
- bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
- continue;
- }
+ if (!skb)
+ goto skb_done;
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -1239,7 +1249,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
/* Free the sk buffer associated with this last transmit */
dev_kfree_skb_any(skb);
-
+skb_done:
/* Make sure the update to bdp and tx_skbuff are performed
* before dirty_tx
*/
@@ -1421,6 +1431,12 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
prefetch(skb->data - NET_IP_ALIGN);
skb_put(skb, pkt_len - 4);
data = skb->data;
+
+#if !defined(CONFIG_M5272)
+ if (fep->quirks & FEC_QUIRK_HAS_RACC)
+ data = skb_pull_inline(skb, 2);
+#endif
+
if (!is_copybreak && need_swap)
swap_buffer(data, pkt_len);
@@ -1685,7 +1701,7 @@ static void fec_get_mac(struct net_device *ndev)
static void fec_enet_adjust_link(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phy_dev = fep->phy_dev;
+ struct phy_device *phy_dev = ndev->phydev;
int status_change = 0;
/* Prevent a state halted on mii error */
@@ -1885,8 +1901,6 @@ static int fec_enet_mii_probe(struct net_device *ndev)
int phy_id;
int dev_id = fep->dev_id;
- fep->phy_dev = NULL;
-
if (fep->phy_node) {
phy_dev = of_phy_connect(ndev, fep->phy_node,
&fec_enet_adjust_link, 0,
@@ -1934,7 +1948,6 @@ static int fec_enet_mii_probe(struct net_device *ndev)
phy_dev->advertising = phy_dev->supported;
- fep->phy_dev = phy_dev;
fep->link = 0;
fep->full_duplex = 0;
@@ -2064,30 +2077,6 @@ static void fec_enet_mii_remove(struct fec_enet_private *fep)
}
}
-static int fec_enet_get_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phydev = fep->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int fec_enet_set_settings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phydev = fep->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static void fec_enet_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
@@ -2220,7 +2209,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
{
struct fec_enet_private *fep = netdev_priv(ndev);
- if (!fep->phy_dev)
+ if (!ndev->phydev)
return -ENODEV;
if (pause->tx_pause != pause->rx_pause) {
@@ -2236,17 +2225,17 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
if (pause->rx_pause || pause->autoneg) {
- fep->phy_dev->supported |= ADVERTISED_Pause;
- fep->phy_dev->advertising |= ADVERTISED_Pause;
+ ndev->phydev->supported |= ADVERTISED_Pause;
+ ndev->phydev->advertising |= ADVERTISED_Pause;
} else {
- fep->phy_dev->supported &= ~ADVERTISED_Pause;
- fep->phy_dev->advertising &= ~ADVERTISED_Pause;
+ ndev->phydev->supported &= ~ADVERTISED_Pause;
+ ndev->phydev->advertising &= ~ADVERTISED_Pause;
}
if (pause->autoneg) {
if (netif_running(ndev))
fec_stop(ndev);
- phy_start_aneg(fep->phy_dev);
+ phy_start_aneg(ndev->phydev);
}
if (netif_running(ndev)) {
napi_disable(&fep->napi);
@@ -2362,8 +2351,7 @@ static int fec_enet_get_sset_count(struct net_device *dev, int sset)
static int fec_enet_nway_reset(struct net_device *dev)
{
- struct fec_enet_private *fep = netdev_priv(dev);
- struct phy_device *phydev = fep->phy_dev;
+ struct phy_device *phydev = dev->phydev;
if (!phydev)
return -ENODEV;
@@ -2388,9 +2376,6 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int rx_itr, tx_itr;
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
- return;
-
/* Must be greater than zero to avoid unpredictable behavior */
if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
!fep->tx_time_itr || !fep->tx_pkts_itr)
@@ -2413,10 +2398,12 @@ static void fec_enet_itr_coal_set(struct net_device *ndev)
writel(tx_itr, fep->hwp + FEC_TXIC0);
writel(rx_itr, fep->hwp + FEC_RXIC0);
- writel(tx_itr, fep->hwp + FEC_TXIC1);
- writel(rx_itr, fep->hwp + FEC_RXIC1);
- writel(tx_itr, fep->hwp + FEC_TXIC2);
- writel(rx_itr, fep->hwp + FEC_RXIC2);
+ if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+ writel(tx_itr, fep->hwp + FEC_TXIC1);
+ writel(rx_itr, fep->hwp + FEC_RXIC1);
+ writel(tx_itr, fep->hwp + FEC_TXIC2);
+ writel(rx_itr, fep->hwp + FEC_RXIC2);
+ }
}
static int
@@ -2424,7 +2411,7 @@ fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+ if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
return -EOPNOTSUPP;
ec->rx_coalesce_usecs = fep->rx_time_itr;
@@ -2442,28 +2429,28 @@ fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int cycle;
- if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+ if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames > 255) {
- pr_err("Rx coalesced frames exceed hardware limiation");
+ pr_err("Rx coalesced frames exceed hardware limitation\n");
return -EINVAL;
}
if (ec->tx_max_coalesced_frames > 255) {
- pr_err("Tx coalesced frame exceed hardware limiation");
+ pr_err("Tx coalesced frame exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesed usec exceeed hardware limiation");
+ pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
cycle = fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr);
if (cycle > 0xFFFF) {
- pr_err("Rx coalesed usec exceeed hardware limiation");
+ pr_err("Rx coalesced usec exceed hardware limitation\n");
return -EINVAL;
}
@@ -2568,8 +2555,6 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops fec_enet_ethtool_ops = {
- .get_settings = fec_enet_get_settings,
- .set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo,
.get_regs_len = fec_enet_get_regs_len,
.get_regs = fec_enet_get_regs,
@@ -2589,12 +2574,14 @@ static const struct ethtool_ops fec_enet_ethtool_ops = {
.set_tunable = fec_enet_set_tunable,
.get_wol = fec_enet_get_wol,
.set_wol = fec_enet_set_wol,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- struct phy_device *phydev = fep->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
if (!netif_running(ndev))
return -EINVAL;
@@ -2848,8 +2835,11 @@ fec_enet_open(struct net_device *ndev)
if (ret)
goto err_enet_mii_probe;
+ if (fep->quirks & FEC_QUIRK_ERR006687)
+ imx6q_cpuidle_fec_irqs_used();
+
napi_enable(&fep->napi);
- phy_start(fep->phy_dev);
+ phy_start(ndev->phydev);
netif_tx_start_all_queues(ndev);
device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
@@ -2873,7 +2863,7 @@ fec_enet_close(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- phy_stop(fep->phy_dev);
+ phy_stop(ndev->phydev);
if (netif_device_present(ndev)) {
napi_disable(&fep->napi);
@@ -2881,8 +2871,10 @@ fec_enet_close(struct net_device *ndev)
fec_stop(ndev);
}
- phy_disconnect(fep->phy_dev);
- fep->phy_dev = NULL;
+ phy_disconnect(ndev->phydev);
+
+ if (fep->quirks & FEC_QUIRK_ERR006687)
+ imx6q_cpuidle_fec_irqs_unused();
fec_enet_clk_enable(ndev, false);
pinctrl_pm_select_sleep_state(&fep->pdev->dev);
@@ -3222,7 +3214,12 @@ static void fec_reset_phy(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
return;
}
- msleep(msec);
+
+ if (msec > 20)
+ msleep(msec);
+ else
+ usleep_range(msec * 1000, msec * 1000 + 1000);
+
gpio_set_value_cansleep(phy_reset, !active_high);
}
#else /* CONFIG_OF */
@@ -3323,6 +3320,11 @@ fec_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
+ if ((of_machine_is_compatible("fsl,imx6q") ||
+ of_machine_is_compatible("fsl,imx6dl")) &&
+ !of_property_read_bool(np, "fsl,err006687-workaround-present"))
+ fep->quirks |= FEC_QUIRK_ERR006687;
+
if (of_get_property(np, "fsl,magic-packet", NULL))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
@@ -3510,7 +3512,7 @@ static int __maybe_unused fec_suspend(struct device *dev)
if (netif_running(ndev)) {
if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
- phy_stop(fep->phy_dev);
+ phy_stop(ndev->phydev);
napi_disable(&fep->napi);
netif_tx_lock_bh(ndev);
netif_device_detach(ndev);
@@ -3570,7 +3572,7 @@ static int __maybe_unused fec_resume(struct device *dev)
netif_device_attach(ndev);
netif_tx_unlock_bh(ndev);
napi_enable(&fep->napi);
- phy_start(fep->phy_dev);
+ phy_start(ndev->phydev);
}
rtnl_unlock();
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 25553ee857b4..446ae9d60c71 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -66,7 +66,6 @@ struct mpc52xx_fec_priv {
/* MDIO link details */
unsigned int mdio_speed;
struct device_node *phy_node;
- struct phy_device *phydev;
enum phy_state link;
int seven_wire_mode;
};
@@ -165,7 +164,7 @@ static int mpc52xx_fec_alloc_rx_buffers(struct net_device *dev, struct bcom_task
static void mpc52xx_fec_adjust_link(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
int new_state = 0;
if (phydev->link != PHY_DOWN) {
@@ -215,16 +214,17 @@ static void mpc52xx_fec_adjust_link(struct net_device *dev)
static int mpc52xx_fec_open(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = NULL;
int err = -EBUSY;
if (priv->phy_node) {
- priv->phydev = of_phy_connect(priv->ndev, priv->phy_node,
- mpc52xx_fec_adjust_link, 0, 0);
- if (!priv->phydev) {
+ phydev = of_phy_connect(priv->ndev, priv->phy_node,
+ mpc52xx_fec_adjust_link, 0, 0);
+ if (!phydev) {
dev_err(&dev->dev, "of_phy_connect failed\n");
return -ENODEV;
}
- phy_start(priv->phydev);
+ phy_start(phydev);
}
if (request_irq(dev->irq, mpc52xx_fec_interrupt, IRQF_SHARED,
@@ -268,10 +268,9 @@ static int mpc52xx_fec_open(struct net_device *dev)
free_ctrl_irq:
free_irq(dev->irq, dev);
free_phy:
- if (priv->phydev) {
- phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ if (phydev) {
+ phy_stop(phydev);
+ phy_disconnect(phydev);
}
return err;
@@ -280,6 +279,7 @@ static int mpc52xx_fec_open(struct net_device *dev)
static int mpc52xx_fec_close(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
netif_stop_queue(dev);
@@ -291,11 +291,10 @@ static int mpc52xx_fec_close(struct net_device *dev)
free_irq(priv->r_irq, dev);
free_irq(priv->t_irq, dev);
- if (priv->phydev) {
+ if (phydev) {
/* power down phy */
- phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ phy_stop(phydev);
+ phy_disconnect(phydev);
}
return 0;
@@ -763,26 +762,6 @@ static void mpc52xx_fec_reset(struct net_device *dev)
/* ethtool interface */
-static int mpc52xx_fec_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-
- if (!priv->phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
-static int mpc52xx_fec_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-
- if (!priv->phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
static u32 mpc52xx_fec_get_msglevel(struct net_device *dev)
{
struct mpc52xx_fec_priv *priv = netdev_priv(dev);
@@ -796,23 +775,23 @@ static void mpc52xx_fec_set_msglevel(struct net_device *dev, u32 level)
}
static const struct ethtool_ops mpc52xx_fec_ethtool_ops = {
- .get_settings = mpc52xx_fec_get_settings,
- .set_settings = mpc52xx_fec_set_settings,
.get_link = ethtool_op_get_link,
.get_msglevel = mpc52xx_fec_get_msglevel,
.set_msglevel = mpc52xx_fec_set_msglevel,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int mpc52xx_fec_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct mpc52xx_fec_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
- if (!priv->phydev)
+ if (!phydev)
return -ENOTSUPP;
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(phydev, rq, cmd);
}
static const struct net_device_ops mpc52xx_fec_netdev_ops = {
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index ea83712a6d62..1de2e1e51c2b 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -615,7 +615,7 @@ struct fman {
struct fman_cfg *cfg;
struct muram_info *muram;
/* cam section in muram */
- int cam_offset;
+ unsigned long cam_offset;
size_t cam_size;
/* Fifo in MURAM */
int fifo_offset;
@@ -2772,7 +2772,7 @@ static struct fman *read_dts_node(struct platform_device *of_dev)
/* Get the FM address */
res = platform_get_resource(of_dev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(&of_dev->dev, "%s: Can't get FMan memory resouce\n",
+ dev_err(&of_dev->dev, "%s: Can't get FMan memory resource\n",
__func__);
goto fman_node_put;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 4eb0e9ac7182..47394c45b6e8 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -129,7 +129,7 @@ unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
*
* Return: address of the allocated memory; NULL otherwise.
*/
-int fman_muram_alloc(struct muram_info *muram, size_t size)
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size)
{
unsigned long vaddr;
@@ -150,7 +150,7 @@ int fman_muram_alloc(struct muram_info *muram, size_t size)
*
* Free an allocated memory from FM-MURAM partition.
*/
-void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size)
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size)
{
unsigned long addr = fman_muram_offset_to_vbase(muram, offset);
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
index dbf0af9e5bb5..889649ad8931 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -44,8 +44,8 @@ struct muram_info *fman_muram_init(phys_addr_t base, size_t size);
unsigned long fman_muram_offset_to_vbase(struct muram_info *muram,
unsigned long offset);
-int fman_muram_alloc(struct muram_info *muram, size_t size);
+unsigned long fman_muram_alloc(struct muram_info *muram, size_t size);
-void fman_muram_free_mem(struct muram_info *muram, u32 offset, size_t size);
+void fman_muram_free_mem(struct muram_info *muram, unsigned long offset, size_t size);
#endif /* __FM_MURAM_EXT */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 48a9c176e0d1..61fd486c50bb 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -652,13 +652,13 @@ static void fs_timeout(struct net_device *dev)
spin_lock_irqsave(&fep->lock, flags);
if (dev->flags & IFF_UP) {
- phy_stop(fep->phydev);
+ phy_stop(dev->phydev);
(*fep->ops->stop)(dev);
(*fep->ops->restart)(dev);
- phy_start(fep->phydev);
+ phy_start(dev->phydev);
}
- phy_start(fep->phydev);
+ phy_start(dev->phydev);
wake = fep->tx_free && !(CBDR_SC(fep->cur_tx) & BD_ENET_TX_READY);
spin_unlock_irqrestore(&fep->lock, flags);
@@ -672,7 +672,7 @@ static void fs_timeout(struct net_device *dev)
static void generic_adjust_link(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- struct phy_device *phydev = fep->phydev;
+ struct phy_device *phydev = dev->phydev;
int new_state = 0;
if (phydev->link) {
@@ -741,8 +741,6 @@ static int fs_init_phy(struct net_device *dev)
return -ENODEV;
}
- fep->phydev = phydev;
-
return 0;
}
@@ -776,7 +774,7 @@ static int fs_enet_open(struct net_device *dev)
napi_disable(&fep->napi_tx);
return err;
}
- phy_start(fep->phydev);
+ phy_start(dev->phydev);
netif_start_queue(dev);
@@ -792,7 +790,7 @@ static int fs_enet_close(struct net_device *dev)
netif_carrier_off(dev);
napi_disable(&fep->napi);
napi_disable(&fep->napi_tx);
- phy_stop(fep->phydev);
+ phy_stop(dev->phydev);
spin_lock_irqsave(&fep->lock, flags);
spin_lock(&fep->tx_lock);
@@ -801,8 +799,7 @@ static int fs_enet_close(struct net_device *dev)
spin_unlock_irqrestore(&fep->lock, flags);
/* release any irqs */
- phy_disconnect(fep->phydev);
- fep->phydev = NULL;
+ phy_disconnect(dev->phydev);
free_irq(fep->interrupt, dev);
return 0;
@@ -847,26 +844,6 @@ static void fs_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = 0;
}
-static int fs_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- if (!fep->phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(fep->phydev, cmd);
-}
-
-static int fs_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct fs_enet_private *fep = netdev_priv(dev);
-
- if (!fep->phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(fep->phydev, cmd);
-}
-
static int fs_nway_reset(struct net_device *dev)
{
return 0;
@@ -887,24 +864,22 @@ static void fs_set_msglevel(struct net_device *dev, u32 value)
static const struct ethtool_ops fs_ethtool_ops = {
.get_drvinfo = fs_get_drvinfo,
.get_regs_len = fs_get_regs_len,
- .get_settings = fs_get_settings,
- .set_settings = fs_set_settings,
.nway_reset = fs_nway_reset,
.get_link = ethtool_op_get_link,
.get_msglevel = fs_get_msglevel,
.set_msglevel = fs_set_msglevel,
.get_regs = fs_get_regs,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int fs_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct fs_enet_private *fep = netdev_priv(dev);
-
if (!netif_running(dev))
return -EINVAL;
- return phy_mii_ioctl(fep->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
extern int fs_mii_connect(struct net_device *dev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
index f184d8f952e2..e29f54a35210 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet.h
@@ -149,7 +149,6 @@ struct fs_enet_private {
unsigned int last_mii_status;
int interrupt;
- struct phy_device *phydev;
int oldduplex, oldspeed, oldlink; /* current settings */
/* event masks */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
index 1ba359f17ec6..d71761a34022 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fcc.c
@@ -370,7 +370,7 @@ static void restart(struct net_device *dev)
/* adjust to speed (for RMII mode) */
if (fpi->use_rmii) {
- if (fep->phydev->speed == 100)
+ if (dev->phydev->speed == 100)
C8(fcccp, fcc_gfemr, 0x20);
else
S8(fcccp, fcc_gfemr, 0x20);
@@ -396,7 +396,7 @@ static void restart(struct net_device *dev)
S32(fccp, fcc_fpsmr, FCC_PSMR_RMII);
/* adjust to duplex mode */
- if (fep->phydev->duplex)
+ if (dev->phydev->duplex)
S32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
else
C32(fccp, fcc_fpsmr, FCC_PSMR_FDE | FCC_PSMR_LPB);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index bade2f8f9b5c..35a318ed3a62 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -254,7 +254,7 @@ static void restart(struct net_device *dev)
int r;
u32 addrhi, addrlo;
- struct mii_bus *mii = fep->phydev->mdio.bus;
+ struct mii_bus *mii = dev->phydev->mdio.bus;
struct fec_info* fec_inf = mii->priv;
r = whack_reset(fep->fec.fecp);
@@ -333,7 +333,7 @@ static void restart(struct net_device *dev)
/*
* adjust to duplex mode
*/
- if (fep->phydev->duplex) {
+ if (dev->phydev->duplex) {
FC(fecp, r_cntrl, FEC_RCNTRL_DRT);
FS(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD enable */
} else {
@@ -363,7 +363,7 @@ static void stop(struct net_device *dev)
const struct fs_platform_info *fpi = fep->fpi;
struct fec __iomem *fecp = fep->fec.fecp;
- struct fec_info *feci = fep->phydev->mdio.bus->priv;
+ struct fec_info *feci = dev->phydev->mdio.bus->priv;
int i;
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index 7a184e8816a4..e8b9c33d35b4 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -352,7 +352,7 @@ static void restart(struct net_device *dev)
W16(sccp, scc_psmr, SCC_PSMR_ENCRC | SCC_PSMR_NIB22);
/* Set full duplex mode if needed */
- if (fep->phydev->duplex)
+ if (dev->phydev->duplex)
S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
/* Restore multicast and promiscuous settings */
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d2f917af539f..4b4f5bc0e279 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -999,7 +999,7 @@ static int gfar_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct gfar_private *priv = netdev_priv(dev);
+ struct phy_device *phydev = dev->phydev;
if (!netif_running(dev))
return -EINVAL;
@@ -1009,10 +1009,10 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (cmd == SIOCGHWTSTAMP)
return gfar_hwtstamp_get(dev, rq);
- if (!priv->phydev)
+ if (!phydev)
return -ENODEV;
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(phydev, rq, cmd);
}
static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
@@ -1635,7 +1635,7 @@ static int gfar_suspend(struct device *dev)
gfar_start_wol_filer(priv);
} else {
- phy_stop(priv->phydev);
+ phy_stop(ndev->phydev);
}
return 0;
@@ -1664,7 +1664,7 @@ static int gfar_resume(struct device *dev)
gfar_filer_restore_table(priv);
} else {
- phy_start(priv->phydev);
+ phy_start(ndev->phydev);
}
gfar_start(priv);
@@ -1698,8 +1698,8 @@ static int gfar_restore(struct device *dev)
priv->oldspeed = 0;
priv->oldduplex = -1;
- if (priv->phydev)
- phy_start(priv->phydev);
+ if (ndev->phydev)
+ phy_start(ndev->phydev);
netif_device_attach(ndev);
enable_napi(priv);
@@ -1778,6 +1778,7 @@ static int init_phy(struct net_device *dev)
priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ?
GFAR_SUPPORTED_GBIT : 0;
phy_interface_t interface;
+ struct phy_device *phydev;
priv->oldlink = 0;
priv->oldspeed = 0;
@@ -1785,9 +1786,9 @@ static int init_phy(struct net_device *dev)
interface = gfar_get_interface(dev);
- priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
- interface);
- if (!priv->phydev) {
+ phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
+ interface);
+ if (!phydev) {
dev_err(&dev->dev, "could not attach to PHY\n");
return -ENODEV;
}
@@ -1796,11 +1797,11 @@ static int init_phy(struct net_device *dev)
gfar_configure_serdes(dev);
/* Remove any features not supported by the controller */
- priv->phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
- priv->phydev->advertising = priv->phydev->supported;
+ phydev->supported &= (GFAR_SUPPORTED | gigabit_support);
+ phydev->advertising = phydev->supported;
/* Add support for flow control, but don't advertise it by default */
- priv->phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
+ phydev->supported |= (SUPPORTED_Pause | SUPPORTED_Asym_Pause);
return 0;
}
@@ -1944,7 +1945,7 @@ void stop_gfar(struct net_device *dev)
/* disable ints and gracefully shut down Rx/Tx DMA */
gfar_halt(priv);
- phy_stop(priv->phydev);
+ phy_stop(dev->phydev);
free_skb_resources(priv);
}
@@ -2076,7 +2077,7 @@ void gfar_start(struct gfar_private *priv)
gfar_ints_enable(priv);
- priv->ndev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(priv->ndev); /* prevent tx timeout */
}
static void free_grp_irqs(struct gfar_priv_grp *grp)
@@ -2204,7 +2205,7 @@ int startup_gfar(struct net_device *ndev)
priv->oldspeed = 0;
priv->oldduplex = -1;
- phy_start(priv->phydev);
+ phy_start(ndev->phydev);
enable_napi(priv);
@@ -2274,7 +2275,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
fcb->flags = flags;
}
-void inline gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
+static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
{
fcb->flags |= TXFCB_VLN;
fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
@@ -2439,7 +2440,8 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_queue->tx_ring_size);
if (likely(!nr_frags)) {
- lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
+ if (likely(!do_tstamp))
+ lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
} else {
u32 lstatus_start = lstatus;
@@ -2572,8 +2574,7 @@ static int gfar_close(struct net_device *dev)
stop_gfar(dev);
/* Disconnect from the PHY */
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ phy_disconnect(dev->phydev);
gfar_free_irq(priv);
@@ -2921,17 +2922,25 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
{
unsigned int size = lstatus & BD_LENGTH_MASK;
struct page *page = rxb->page;
+ bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
/* Remove the FCS from the packet length */
- if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
+ if (last)
size -= ETH_FCS_LEN;
- if (likely(first))
+ if (likely(first)) {
skb_put(skb, size);
- else
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rxb->page_offset + RXBUF_ALIGNMENT,
- size, GFAR_RXB_TRUESIZE);
+ } else {
+ /* the last fragments' length contains the full frame length */
+ if (last)
+ size -= skb->len;
+
+ /* in case the last fragment consisted only of the FCS */
+ if (size > 0)
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rxb->page_offset + RXBUF_ALIGNMENT,
+ size, GFAR_RXB_TRUESIZE);
+ }
/* try reuse page */
if (unlikely(page_count(page) != 1))
@@ -3379,7 +3388,7 @@ static irqreturn_t gfar_interrupt(int irq, void *grp_id)
static void adjust_link(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
if (unlikely(phydev->link != priv->oldlink ||
(phydev->link && (phydev->duplex != priv->oldduplex ||
@@ -3620,7 +3629,8 @@ static irqreturn_t gfar_error(int irq, void *grp_id)
static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
{
- struct phy_device *phydev = priv->phydev;
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
u32 val = 0;
if (!phydev->duplex)
@@ -3660,7 +3670,8 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
static noinline void gfar_update_link_state(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- struct phy_device *phydev = priv->phydev;
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
struct gfar_priv_rx_q *rx_queue = NULL;
int i;
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index cb77667971a7..6e8a9c8467b9 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -100,7 +100,8 @@ extern const char gfar_driver_version[];
#define DEFAULT_RX_LFC_THR 16
#define DEFAULT_LFC_PTVVAL 4
-#define GFAR_RXB_SIZE 1536
+/* prevent fragmenation by HW in DSA environments */
+#define GFAR_RXB_SIZE roundup(1536 + 8, 64)
#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define GFAR_RXB_TRUESIZE 2048
@@ -1153,7 +1154,6 @@ struct gfar_private {
phy_interface_t interface;
struct device_node *phy_node;
struct device_node *tbi_node;
- struct phy_device *phydev;
struct mii_bus *mii_bus;
int oldspeed;
int oldduplex;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 4b0ee855edd7..56588f2e1d91 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -184,40 +184,6 @@ static void gfar_gdrvinfo(struct net_device *dev,
strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
}
-
-static int gfar_ssettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct gfar_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
-
- if (NULL == phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
-
-/* Return the current settings in the ethtool_cmd structure */
-static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct gfar_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
- struct gfar_priv_rx_q *rx_queue = NULL;
- struct gfar_priv_tx_q *tx_queue = NULL;
-
- if (NULL == phydev)
- return -ENODEV;
- tx_queue = priv->tx_queue[0];
- rx_queue = priv->rx_queue[0];
-
- /* etsec-1.7 and older versions have only one txic
- * and rxic regs although they support multiple queues */
- cmd->maxtxpkt = get_icft_value(tx_queue->txic);
- cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
/* Return the length of the register structure */
static int gfar_reglen(struct net_device *dev)
{
@@ -242,10 +208,12 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs,
static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
unsigned int usecs)
{
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
unsigned int count;
/* The timer is different, depending on the interface speed */
- switch (priv->phydev->speed) {
+ switch (phydev->speed) {
case SPEED_1000:
count = GFAR_GBIT_TIME;
break;
@@ -267,10 +235,12 @@ static unsigned int gfar_usecs2ticks(struct gfar_private *priv,
static unsigned int gfar_ticks2usecs(struct gfar_private *priv,
unsigned int ticks)
{
+ struct net_device *ndev = priv->ndev;
+ struct phy_device *phydev = ndev->phydev;
unsigned int count;
/* The timer is different, depending on the interface speed */
- switch (priv->phydev->speed) {
+ switch (phydev->speed) {
case SPEED_1000:
count = GFAR_GBIT_TIME;
break;
@@ -304,7 +274,7 @@ static int gfar_gcoalesce(struct net_device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
return -EOPNOTSUPP;
- if (NULL == priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
rx_queue = priv->rx_queue[0];
@@ -365,7 +335,7 @@ static int gfar_scoalesce(struct net_device *dev,
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
return -EOPNOTSUPP;
- if (NULL == priv->phydev)
+ if (!dev->phydev)
return -ENODEV;
/* Check the bounds of the values */
@@ -529,7 +499,7 @@ static int gfar_spauseparam(struct net_device *dev,
struct ethtool_pauseparam *epause)
{
struct gfar_private *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 oldadv, newadv;
@@ -1565,8 +1535,6 @@ static int gfar_get_ts_info(struct net_device *dev,
}
const struct ethtool_ops gfar_ethtool_ops = {
- .get_settings = gfar_gsettings,
- .set_settings = gfar_ssettings,
.get_drvinfo = gfar_gdrvinfo,
.get_regs_len = gfar_reglen,
.get_regs = gfar_get_regs,
@@ -1589,4 +1557,6 @@ const struct ethtool_ops gfar_ethtool_ops = {
.set_rxnfc = gfar_set_nfc,
.get_rxnfc = gfar_get_nfc,
.get_ts_info = gfar_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 89714f5e0dfc..812a968a78e9 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -105,23 +105,20 @@ static const char rx_fw_stat_gstrings[][ETH_GSTRING_LEN] = {
#define UEC_RX_FW_STATS_LEN ARRAY_SIZE(rx_fw_stat_gstrings)
static int
-uec_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+uec_get_ksettings(struct net_device *netdev, struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct phy_device *phydev = ugeth->phydev;
- struct ucc_geth_info *ug_info = ugeth->ug_info;
if (!phydev)
return -ENODEV;
- ecmd->maxtxpkt = 1;
- ecmd->maxrxpkt = ug_info->interruptcoalescingmaxvalue[0];
-
- return phy_ethtool_gset(phydev, ecmd);
+ return phy_ethtool_ksettings_get(phydev, cmd);
}
static int
-uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+uec_set_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct ucc_geth_private *ugeth = netdev_priv(netdev);
struct phy_device *phydev = ugeth->phydev;
@@ -129,7 +126,7 @@ uec_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
if (!phydev)
return -ENODEV;
- return phy_ethtool_sset(phydev, ecmd);
+ return phy_ethtool_ksettings_set(phydev, cmd);
}
static void
@@ -392,8 +389,6 @@ static int uec_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
#endif /* CONFIG_PM */
static const struct ethtool_ops uec_ethtool_ops = {
- .get_settings = uec_get_settings,
- .set_settings = uec_set_settings,
.get_drvinfo = uec_get_drvinfo,
.get_regs_len = uec_get_regs_len,
.get_regs = uec_get_regs,
@@ -411,6 +406,8 @@ static const struct ethtool_ops uec_ethtool_ops = {
.get_wol = uec_get_wol,
.set_wol = uec_set_wol,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = uec_get_ksettings,
+ .set_link_ksettings = uec_set_ksettings,
};
void uec_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 678f5018d0be..399cfd217288 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -746,7 +746,7 @@ static irqreturn_t fjn_interrupt(int dummy, void *dev_id)
lp->sent = lp->tx_queue ;
lp->tx_queue = 0;
lp->tx_queue_len = 0;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
} else {
lp->tx_started = 0;
}
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 4ccc032633c4..d11287e11371 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_HISILICON
bool "Hisilicon devices"
default y
- depends on OF && HAS_DMA
+ depends on (OF || ACPI) && HAS_DMA
depends on ARM || ARM64 || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -23,6 +23,18 @@ config HIX5HD2_GMAC
help
This selects the hix5hd2 mac family network device.
+config HISI_FEMAC
+ tristate "Hisilicon Fast Ethernet MAC device support"
+ depends on HAS_IOMEM
+ select PHYLIB
+ select RESET_CONTROLLER
+ help
+ This selects the Hisilicon Fast Ethernet MAC device(FEMAC).
+ The FEMAC receives and transmits data over Ethernet
+ ports at 10/100 Mbps in full-duplex or half-duplex mode.
+ The FEMAC exchanges data with the CPU, and supports
+ the energy efficient Ethernet (EEE).
+
config HIP04_ETH
tristate "HISILICON P04 Ethernet support"
depends on HAS_IOMEM # For MFD_SYSCON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
index 390b71fb3000..8661695024dc 100644
--- a/drivers/net/ethernet/hisilicon/Makefile
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
obj-$(CONFIG_HIP04_ETH) += hip04_eth.o
obj-$(CONFIG_HNS_MDIO) += hns_mdio.o
obj-$(CONFIG_HNS) += hns/
+obj-$(CONFIG_HISI_FEMAC) += hisi_femac.o
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
new file mode 100644
index 000000000000..b5d7ad0252a0
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -0,0 +1,1007 @@
+/*
+ * Hisilicon Fast Ethernet MAC Driver
+ *
+ * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* MAC control register list */
+#define MAC_PORTSEL 0x0200
+#define MAC_PORTSEL_STAT_CPU BIT(0)
+#define MAC_PORTSEL_RMII BIT(1)
+#define MAC_PORTSET 0x0208
+#define MAC_PORTSET_DUPLEX_FULL BIT(0)
+#define MAC_PORTSET_LINKED BIT(1)
+#define MAC_PORTSET_SPEED_100M BIT(2)
+#define MAC_SET 0x0210
+#define MAX_FRAME_SIZE 1600
+#define MAX_FRAME_SIZE_MASK GENMASK(10, 0)
+#define BIT_PAUSE_EN BIT(18)
+#define RX_COALESCE_SET 0x0340
+#define RX_COALESCED_FRAME_OFFSET 24
+#define RX_COALESCED_FRAMES 8
+#define RX_COALESCED_TIMER 0x74
+#define QLEN_SET 0x0344
+#define RX_DEPTH_OFFSET 8
+#define MAX_HW_FIFO_DEPTH 64
+#define HW_TX_FIFO_DEPTH 12
+#define HW_RX_FIFO_DEPTH (MAX_HW_FIFO_DEPTH - HW_TX_FIFO_DEPTH)
+#define IQFRM_DES 0x0354
+#define RX_FRAME_LEN_MASK GENMASK(11, 0)
+#define IQ_ADDR 0x0358
+#define EQ_ADDR 0x0360
+#define EQFRM_LEN 0x0364
+#define ADDRQ_STAT 0x036C
+#define TX_CNT_INUSE_MASK GENMASK(5, 0)
+#define BIT_TX_READY BIT(24)
+#define BIT_RX_READY BIT(25)
+/* global control register list */
+#define GLB_HOSTMAC_L32 0x0000
+#define GLB_HOSTMAC_H16 0x0004
+#define GLB_SOFT_RESET 0x0008
+#define SOFT_RESET_ALL BIT(0)
+#define GLB_FWCTRL 0x0010
+#define FWCTRL_VLAN_ENABLE BIT(0)
+#define FWCTRL_FW2CPU_ENA BIT(5)
+#define FWCTRL_FWALL2CPU BIT(7)
+#define GLB_MACTCTRL 0x0014
+#define MACTCTRL_UNI2CPU BIT(1)
+#define MACTCTRL_MULTI2CPU BIT(3)
+#define MACTCTRL_BROAD2CPU BIT(5)
+#define MACTCTRL_MACT_ENA BIT(7)
+#define GLB_IRQ_STAT 0x0030
+#define GLB_IRQ_ENA 0x0034
+#define IRQ_ENA_PORT0_MASK GENMASK(7, 0)
+#define IRQ_ENA_PORT0 BIT(18)
+#define IRQ_ENA_ALL BIT(19)
+#define GLB_IRQ_RAW 0x0038
+#define IRQ_INT_RX_RDY BIT(0)
+#define IRQ_INT_TX_PER_PACKET BIT(1)
+#define IRQ_INT_TX_FIFO_EMPTY BIT(6)
+#define IRQ_INT_MULTI_RXRDY BIT(7)
+#define DEF_INT_MASK (IRQ_INT_MULTI_RXRDY | \
+ IRQ_INT_TX_PER_PACKET | \
+ IRQ_INT_TX_FIFO_EMPTY)
+#define GLB_MAC_L32_BASE 0x0100
+#define GLB_MAC_H16_BASE 0x0104
+#define MACFLT_HI16_MASK GENMASK(15, 0)
+#define BIT_MACFLT_ENA BIT(17)
+#define BIT_MACFLT_FW2CPU BIT(21)
+#define GLB_MAC_H16(reg) (GLB_MAC_H16_BASE + ((reg) * 0x8))
+#define GLB_MAC_L32(reg) (GLB_MAC_L32_BASE + ((reg) * 0x8))
+#define MAX_MAC_FILTER_NUM 8
+#define MAX_UNICAST_ADDRESSES 2
+#define MAX_MULTICAST_ADDRESSES (MAX_MAC_FILTER_NUM - \
+ MAX_UNICAST_ADDRESSES)
+/* software tx and rx queue number, should be power of 2 */
+#define TXQ_NUM 64
+#define RXQ_NUM 128
+#define FEMAC_POLL_WEIGHT 16
+
+#define PHY_RESET_DELAYS_PROPERTY "hisilicon,phy-reset-delays-us"
+
+enum phy_reset_delays {
+ PRE_DELAY,
+ PULSE,
+ POST_DELAY,
+ DELAYS_NUM,
+};
+
+struct hisi_femac_queue {
+ struct sk_buff **skb;
+ dma_addr_t *dma_phys;
+ int num;
+ unsigned int head;
+ unsigned int tail;
+};
+
+struct hisi_femac_priv {
+ void __iomem *port_base;
+ void __iomem *glb_base;
+ struct clk *clk;
+ struct reset_control *mac_rst;
+ struct reset_control *phy_rst;
+ u32 phy_reset_delays[DELAYS_NUM];
+ u32 link_status;
+
+ struct device *dev;
+ struct net_device *ndev;
+
+ struct hisi_femac_queue txq;
+ struct hisi_femac_queue rxq;
+ u32 tx_fifo_used_cnt;
+ struct napi_struct napi;
+};
+
+static void hisi_femac_irq_enable(struct hisi_femac_priv *priv, int irqs)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_IRQ_ENA);
+ writel(val | irqs, priv->glb_base + GLB_IRQ_ENA);
+}
+
+static void hisi_femac_irq_disable(struct hisi_femac_priv *priv, int irqs)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_IRQ_ENA);
+ writel(val & (~irqs), priv->glb_base + GLB_IRQ_ENA);
+}
+
+static void hisi_femac_tx_dma_unmap(struct hisi_femac_priv *priv,
+ struct sk_buff *skb, unsigned int pos)
+{
+ dma_addr_t dma_addr;
+
+ dma_addr = priv->txq.dma_phys[pos];
+ dma_unmap_single(priv->dev, dma_addr, skb->len, DMA_TO_DEVICE);
+}
+
+static void hisi_femac_xmit_reclaim(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct hisi_femac_queue *txq = &priv->txq;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ u32 val;
+
+ netif_tx_lock(dev);
+
+ val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
+ while (val < priv->tx_fifo_used_cnt) {
+ skb = txq->skb[txq->tail];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "xmitq_cnt_inuse=%d, tx_fifo_used=%d\n",
+ val, priv->tx_fifo_used_cnt);
+ break;
+ }
+ hisi_femac_tx_dma_unmap(priv, skb, txq->tail);
+ pkts_compl++;
+ bytes_compl += skb->len;
+ dev_kfree_skb_any(skb);
+
+ priv->tx_fifo_used_cnt--;
+
+ val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
+ txq->skb[txq->tail] = NULL;
+ txq->tail = (txq->tail + 1) % txq->num;
+ }
+
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ if (unlikely(netif_queue_stopped(dev)) && pkts_compl)
+ netif_wake_queue(dev);
+
+ netif_tx_unlock(dev);
+}
+
+static void hisi_femac_adjust_link(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = dev->phydev;
+ u32 status = 0;
+
+ if (phy->link)
+ status |= MAC_PORTSET_LINKED;
+ if (phy->duplex == DUPLEX_FULL)
+ status |= MAC_PORTSET_DUPLEX_FULL;
+ if (phy->speed == SPEED_100)
+ status |= MAC_PORTSET_SPEED_100M;
+
+ if ((status != priv->link_status) &&
+ ((status | priv->link_status) & MAC_PORTSET_LINKED)) {
+ writel(status, priv->port_base + MAC_PORTSET);
+ priv->link_status = status;
+ phy_print_status(phy);
+ }
+}
+
+static void hisi_femac_rx_refill(struct hisi_femac_priv *priv)
+{
+ struct hisi_femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb;
+ u32 pos;
+ u32 len = MAX_FRAME_SIZE;
+ dma_addr_t addr;
+
+ pos = rxq->head;
+ while (readl(priv->port_base + ADDRQ_STAT) & BIT_RX_READY) {
+ if (!CIRC_SPACE(pos, rxq->tail, rxq->num))
+ break;
+ if (unlikely(rxq->skb[pos])) {
+ netdev_err(priv->ndev, "err skb[%d]=%p\n",
+ pos, rxq->skb[pos]);
+ break;
+ }
+ skb = netdev_alloc_skb_ip_align(priv->ndev, len);
+ if (unlikely(!skb))
+ break;
+
+ addr = dma_map_single(priv->dev, skb->data, len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ rxq->dma_phys[pos] = addr;
+ rxq->skb[pos] = skb;
+ writel(addr, priv->port_base + IQ_ADDR);
+ pos = (pos + 1) % rxq->num;
+ }
+ rxq->head = pos;
+}
+
+static int hisi_femac_rx(struct net_device *dev, int limit)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct hisi_femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb;
+ dma_addr_t addr;
+ u32 rx_pkt_info, pos, len, rx_pkts_num = 0;
+
+ pos = rxq->tail;
+ while (readl(priv->glb_base + GLB_IRQ_RAW) & IRQ_INT_RX_RDY) {
+ rx_pkt_info = readl(priv->port_base + IQFRM_DES);
+ len = rx_pkt_info & RX_FRAME_LEN_MASK;
+ len -= ETH_FCS_LEN;
+
+ /* tell hardware we will deal with this packet */
+ writel(IRQ_INT_RX_RDY, priv->glb_base + GLB_IRQ_RAW);
+
+ rx_pkts_num++;
+
+ skb = rxq->skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "rx skb NULL. pos=%d\n", pos);
+ break;
+ }
+ rxq->skb[pos] = NULL;
+
+ addr = rxq->dma_phys[pos];
+ dma_unmap_single(priv->dev, addr, MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ skb_put(skb, len);
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ netdev_err(dev, "rcv len err, len = %d\n", skb->len);
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+ napi_gro_receive(&priv->napi, skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+next:
+ pos = (pos + 1) % rxq->num;
+ if (rx_pkts_num >= limit)
+ break;
+ }
+ rxq->tail = pos;
+
+ hisi_femac_rx_refill(priv);
+
+ return rx_pkts_num;
+}
+
+static int hisi_femac_poll(struct napi_struct *napi, int budget)
+{
+ struct hisi_femac_priv *priv = container_of(napi,
+ struct hisi_femac_priv, napi);
+ struct net_device *dev = priv->ndev;
+ int work_done = 0, task = budget;
+ int ints, num;
+
+ do {
+ hisi_femac_xmit_reclaim(dev);
+ num = hisi_femac_rx(dev, task);
+ work_done += num;
+ task -= num;
+ if (work_done >= budget)
+ break;
+
+ ints = readl(priv->glb_base + GLB_IRQ_RAW);
+ writel(ints & DEF_INT_MASK,
+ priv->glb_base + GLB_IRQ_RAW);
+ } while (ints & DEF_INT_MASK);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ hisi_femac_irq_enable(priv, DEF_INT_MASK &
+ (~IRQ_INT_TX_PER_PACKET));
+ }
+
+ return work_done;
+}
+
+static irqreturn_t hisi_femac_interrupt(int irq, void *dev_id)
+{
+ int ints;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ ints = readl(priv->glb_base + GLB_IRQ_RAW);
+
+ if (likely(ints & DEF_INT_MASK)) {
+ writel(ints & DEF_INT_MASK,
+ priv->glb_base + GLB_IRQ_RAW);
+ hisi_femac_irq_disable(priv, DEF_INT_MASK);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_femac_init_queue(struct device *dev,
+ struct hisi_femac_queue *queue,
+ unsigned int num)
+{
+ queue->skb = devm_kcalloc(dev, num, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!queue->skb)
+ return -ENOMEM;
+
+ queue->dma_phys = devm_kcalloc(dev, num, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!queue->dma_phys)
+ return -ENOMEM;
+
+ queue->num = num;
+ queue->head = 0;
+ queue->tail = 0;
+
+ return 0;
+}
+
+static int hisi_femac_init_tx_and_rx_queues(struct hisi_femac_priv *priv)
+{
+ int ret;
+
+ ret = hisi_femac_init_queue(priv->dev, &priv->txq, TXQ_NUM);
+ if (ret)
+ return ret;
+
+ ret = hisi_femac_init_queue(priv->dev, &priv->rxq, RXQ_NUM);
+ if (ret)
+ return ret;
+
+ priv->tx_fifo_used_cnt = 0;
+
+ return 0;
+}
+
+static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv)
+{
+ struct hisi_femac_queue *txq = &priv->txq;
+ struct hisi_femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u32 pos;
+
+ pos = rxq->tail;
+ while (pos != rxq->head) {
+ skb = rxq->skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(priv->ndev, "NULL rx skb. pos=%d, head=%d\n",
+ pos, rxq->head);
+ continue;
+ }
+
+ dma_addr = rxq->dma_phys[pos];
+ dma_unmap_single(priv->dev, dma_addr, MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ rxq->skb[pos] = NULL;
+ pos = (pos + 1) % rxq->num;
+ }
+ rxq->tail = pos;
+
+ pos = txq->tail;
+ while (pos != txq->head) {
+ skb = txq->skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(priv->ndev, "NULL tx skb. pos=%d, head=%d\n",
+ pos, txq->head);
+ continue;
+ }
+ hisi_femac_tx_dma_unmap(priv, skb, pos);
+ dev_kfree_skb_any(skb);
+ txq->skb[pos] = NULL;
+ pos = (pos + 1) % txq->num;
+ }
+ txq->tail = pos;
+ priv->tx_fifo_used_cnt = 0;
+}
+
+static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv,
+ unsigned char *mac)
+{
+ u32 reg;
+
+ reg = mac[1] | (mac[0] << 8);
+ writel(reg, priv->glb_base + GLB_HOSTMAC_H16);
+
+ reg = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
+ writel(reg, priv->glb_base + GLB_HOSTMAC_L32);
+
+ return 0;
+}
+
+static int hisi_femac_port_reset(struct hisi_femac_priv *priv)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_SOFT_RESET);
+ val |= SOFT_RESET_ALL;
+ writel(val, priv->glb_base + GLB_SOFT_RESET);
+
+ usleep_range(500, 800);
+
+ val &= ~SOFT_RESET_ALL;
+ writel(val, priv->glb_base + GLB_SOFT_RESET);
+
+ return 0;
+}
+
+static int hisi_femac_net_open(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ hisi_femac_port_reset(priv);
+ hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
+ hisi_femac_rx_refill(priv);
+
+ netif_carrier_off(dev);
+ netdev_reset_queue(dev);
+ netif_start_queue(dev);
+ napi_enable(&priv->napi);
+
+ priv->link_status = 0;
+ if (dev->phydev)
+ phy_start(dev->phydev);
+
+ writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
+ hisi_femac_irq_enable(priv, IRQ_ENA_ALL | IRQ_ENA_PORT0 | DEF_INT_MASK);
+
+ return 0;
+}
+
+static int hisi_femac_net_close(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ hisi_femac_irq_disable(priv, IRQ_ENA_PORT0);
+
+ if (dev->phydev)
+ phy_stop(dev->phydev);
+
+ netif_stop_queue(dev);
+ napi_disable(&priv->napi);
+
+ hisi_femac_free_skb_rings(priv);
+
+ return 0;
+}
+
+static netdev_tx_t hisi_femac_net_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct hisi_femac_queue *txq = &priv->txq;
+ dma_addr_t addr;
+ u32 val;
+
+ val = readl(priv->port_base + ADDRQ_STAT);
+ val &= BIT_TX_READY;
+ if (!val) {
+ hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
+ dev->stats.tx_dropped++;
+ dev->stats.tx_fifo_errors++;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(!CIRC_SPACE(txq->head, txq->tail,
+ txq->num))) {
+ hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
+ dev->stats.tx_dropped++;
+ dev->stats.tx_fifo_errors++;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ addr = dma_map_single(priv->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr))) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ txq->dma_phys[txq->head] = addr;
+
+ txq->skb[txq->head] = skb;
+ txq->head = (txq->head + 1) % txq->num;
+
+ writel(addr, priv->port_base + EQ_ADDR);
+ writel(skb->len + ETH_FCS_LEN, priv->port_base + EQFRM_LEN);
+
+ priv->tx_fifo_used_cnt++;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ netdev_sent_queue(dev, skb->len);
+
+ return NETDEV_TX_OK;
+}
+
+static int hisi_femac_set_mac_address(struct net_device *dev, void *p)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct sockaddr *skaddr = p;
+
+ if (!is_valid_ether_addr(skaddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, skaddr->sa_data, dev->addr_len);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+
+ hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
+
+ return 0;
+}
+
+static void hisi_femac_enable_hw_addr_filter(struct hisi_femac_priv *priv,
+ unsigned int reg_n, bool enable)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_MAC_H16(reg_n));
+ if (enable)
+ val |= BIT_MACFLT_ENA;
+ else
+ val &= ~BIT_MACFLT_ENA;
+ writel(val, priv->glb_base + GLB_MAC_H16(reg_n));
+}
+
+static void hisi_femac_set_hw_addr_filter(struct hisi_femac_priv *priv,
+ unsigned char *addr,
+ unsigned int reg_n)
+{
+ unsigned int high, low;
+ u32 val;
+
+ high = GLB_MAC_H16(reg_n);
+ low = GLB_MAC_L32(reg_n);
+
+ val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ writel(val, priv->glb_base + low);
+
+ val = readl(priv->glb_base + high);
+ val &= ~MACFLT_HI16_MASK;
+ val |= ((addr[0] << 8) | addr[1]);
+ val |= (BIT_MACFLT_ENA | BIT_MACFLT_FW2CPU);
+ writel(val, priv->glb_base + high);
+}
+
+static void hisi_femac_set_promisc_mode(struct hisi_femac_priv *priv,
+ bool promisc_mode)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_FWCTRL);
+ if (promisc_mode)
+ val |= FWCTRL_FWALL2CPU;
+ else
+ val &= ~FWCTRL_FWALL2CPU;
+ writel(val, priv->glb_base + GLB_FWCTRL);
+}
+
+/* Handle multiple multicast addresses (perfect filtering)*/
+static void hisi_femac_set_mc_addr_filter(struct hisi_femac_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_MACTCTRL);
+ if ((netdev_mc_count(dev) > MAX_MULTICAST_ADDRESSES) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ val |= MACTCTRL_MULTI2CPU;
+ } else {
+ int reg = MAX_UNICAST_ADDRESSES;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ for (i = reg; i < MAX_MAC_FILTER_NUM; i++)
+ hisi_femac_enable_hw_addr_filter(priv, i, false);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ hisi_femac_set_hw_addr_filter(priv, ha->addr, reg);
+ reg++;
+ }
+ val &= ~MACTCTRL_MULTI2CPU;
+ }
+ writel(val, priv->glb_base + GLB_MACTCTRL);
+}
+
+/* Handle multiple unicast addresses (perfect filtering)*/
+static void hisi_femac_set_uc_addr_filter(struct hisi_femac_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_MACTCTRL);
+ if (netdev_uc_count(dev) > MAX_UNICAST_ADDRESSES) {
+ val |= MACTCTRL_UNI2CPU;
+ } else {
+ int reg = 0;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ for (i = reg; i < MAX_UNICAST_ADDRESSES; i++)
+ hisi_femac_enable_hw_addr_filter(priv, i, false);
+
+ netdev_for_each_uc_addr(ha, dev) {
+ hisi_femac_set_hw_addr_filter(priv, ha->addr, reg);
+ reg++;
+ }
+ val &= ~MACTCTRL_UNI2CPU;
+ }
+ writel(val, priv->glb_base + GLB_MACTCTRL);
+}
+
+static void hisi_femac_net_set_rx_mode(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ hisi_femac_set_promisc_mode(priv, true);
+ } else {
+ hisi_femac_set_promisc_mode(priv, false);
+ hisi_femac_set_mc_addr_filter(priv);
+ hisi_femac_set_uc_addr_filter(priv);
+ }
+}
+
+static int hisi_femac_net_ioctl(struct net_device *dev,
+ struct ifreq *ifreq, int cmd)
+{
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!dev->phydev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(dev->phydev, ifreq, cmd);
+}
+
+static struct ethtool_ops hisi_femac_ethtools_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static const struct net_device_ops hisi_femac_netdev_ops = {
+ .ndo_open = hisi_femac_net_open,
+ .ndo_stop = hisi_femac_net_close,
+ .ndo_start_xmit = hisi_femac_net_xmit,
+ .ndo_do_ioctl = hisi_femac_net_ioctl,
+ .ndo_set_mac_address = hisi_femac_set_mac_address,
+ .ndo_set_rx_mode = hisi_femac_net_set_rx_mode,
+ .ndo_change_mtu = eth_change_mtu,
+};
+
+static void hisi_femac_core_reset(struct hisi_femac_priv *priv)
+{
+ reset_control_assert(priv->mac_rst);
+ reset_control_deassert(priv->mac_rst);
+}
+
+static void hisi_femac_sleep_us(u32 time_us)
+{
+ u32 time_ms;
+
+ if (!time_us)
+ return;
+
+ time_ms = DIV_ROUND_UP(time_us, 1000);
+ if (time_ms < 20)
+ usleep_range(time_us, time_us + 500);
+ else
+ msleep(time_ms);
+}
+
+static void hisi_femac_phy_reset(struct hisi_femac_priv *priv)
+{
+ /* To make sure PHY hardware reset success,
+ * we must keep PHY in deassert state first and
+ * then complete the hardware reset operation
+ */
+ reset_control_deassert(priv->phy_rst);
+ hisi_femac_sleep_us(priv->phy_reset_delays[PRE_DELAY]);
+
+ reset_control_assert(priv->phy_rst);
+ /* delay some time to ensure reset ok,
+ * this depends on PHY hardware feature
+ */
+ hisi_femac_sleep_us(priv->phy_reset_delays[PULSE]);
+ reset_control_deassert(priv->phy_rst);
+ /* delay some time to ensure later MDIO access */
+ hisi_femac_sleep_us(priv->phy_reset_delays[POST_DELAY]);
+}
+
+static void hisi_femac_port_init(struct hisi_femac_priv *priv)
+{
+ u32 val;
+
+ /* MAC gets link status info and phy mode by software config */
+ val = MAC_PORTSEL_STAT_CPU;
+ if (priv->ndev->phydev->interface == PHY_INTERFACE_MODE_RMII)
+ val |= MAC_PORTSEL_RMII;
+ writel(val, priv->port_base + MAC_PORTSEL);
+
+ /*clear all interrupt status */
+ writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
+ hisi_femac_irq_disable(priv, IRQ_ENA_PORT0_MASK | IRQ_ENA_PORT0);
+
+ val = readl(priv->glb_base + GLB_FWCTRL);
+ val &= ~(FWCTRL_VLAN_ENABLE | FWCTRL_FWALL2CPU);
+ val |= FWCTRL_FW2CPU_ENA;
+ writel(val, priv->glb_base + GLB_FWCTRL);
+
+ val = readl(priv->glb_base + GLB_MACTCTRL);
+ val |= (MACTCTRL_BROAD2CPU | MACTCTRL_MACT_ENA);
+ writel(val, priv->glb_base + GLB_MACTCTRL);
+
+ val = readl(priv->port_base + MAC_SET);
+ val &= ~MAX_FRAME_SIZE_MASK;
+ val |= MAX_FRAME_SIZE;
+ writel(val, priv->port_base + MAC_SET);
+
+ val = RX_COALESCED_TIMER |
+ (RX_COALESCED_FRAMES << RX_COALESCED_FRAME_OFFSET);
+ writel(val, priv->port_base + RX_COALESCE_SET);
+
+ val = (HW_RX_FIFO_DEPTH << RX_DEPTH_OFFSET) | HW_TX_FIFO_DEPTH;
+ writel(val, priv->port_base + QLEN_SET);
+}
+
+static int hisi_femac_drv_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct resource *res;
+ struct net_device *ndev;
+ struct hisi_femac_priv *priv;
+ struct phy_device *phy;
+ const char *mac_addr;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(*priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+
+ priv = netdev_priv(ndev);
+ priv->dev = dev;
+ priv->ndev = ndev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->port_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->port_base)) {
+ ret = PTR_ERR(priv->port_base);
+ goto out_free_netdev;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->glb_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->glb_base)) {
+ ret = PTR_ERR(priv->glb_base);
+ goto out_free_netdev;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get clk\n");
+ ret = -ENODEV;
+ goto out_free_netdev;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk %d\n", ret);
+ goto out_free_netdev;
+ }
+
+ priv->mac_rst = devm_reset_control_get(dev, "mac");
+ if (IS_ERR(priv->mac_rst)) {
+ ret = PTR_ERR(priv->mac_rst);
+ goto out_disable_clk;
+ }
+ hisi_femac_core_reset(priv);
+
+ priv->phy_rst = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(priv->phy_rst)) {
+ priv->phy_rst = NULL;
+ } else {
+ ret = of_property_read_u32_array(node,
+ PHY_RESET_DELAYS_PROPERTY,
+ priv->phy_reset_delays,
+ DELAYS_NUM);
+ if (ret)
+ goto out_disable_clk;
+ hisi_femac_phy_reset(priv);
+ }
+
+ phy = of_phy_get_and_connect(ndev, node, hisi_femac_adjust_link);
+ if (!phy) {
+ dev_err(dev, "connect to PHY failed!\n");
+ ret = -ENODEV;
+ goto out_disable_clk;
+ }
+
+ phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
+ (unsigned long)phy->phy_id,
+ phy_modes(phy->interface));
+
+ mac_addr = of_get_mac_address(node);
+ if (mac_addr)
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(dev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
+
+ ndev->watchdog_timeo = 6 * HZ;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->netdev_ops = &hisi_femac_netdev_ops;
+ ndev->ethtool_ops = &hisi_femac_ethtools_ops;
+ netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ hisi_femac_port_init(priv);
+
+ ret = hisi_femac_init_tx_and_rx_queues(priv);
+ if (ret)
+ goto out_disconnect_phy;
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq <= 0) {
+ dev_err(dev, "No irq resource\n");
+ ret = -ENODEV;
+ goto out_disconnect_phy;
+ }
+
+ ret = devm_request_irq(dev, ndev->irq, hisi_femac_interrupt,
+ IRQF_SHARED, pdev->name, ndev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq %d failed!\n", ndev->irq);
+ goto out_disconnect_phy;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(dev, "register_netdev failed!\n");
+ goto out_disconnect_phy;
+ }
+
+ return ret;
+
+out_disconnect_phy:
+ netif_napi_del(&priv->napi);
+ phy_disconnect(phy);
+out_disable_clk:
+ clk_disable_unprepare(priv->clk);
+out_free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int hisi_femac_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hisi_femac_priv *priv = netdev_priv(ndev);
+
+ netif_napi_del(&priv->napi);
+ unregister_netdev(ndev);
+
+ phy_disconnect(ndev->phydev);
+ clk_disable_unprepare(priv->clk);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+int hisi_femac_drv_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hisi_femac_priv *priv = netdev_priv(ndev);
+
+ disable_irq(ndev->irq);
+ if (netif_running(ndev)) {
+ hisi_femac_net_close(ndev);
+ netif_device_detach(ndev);
+ }
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+int hisi_femac_drv_resume(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hisi_femac_priv *priv = netdev_priv(ndev);
+
+ clk_prepare_enable(priv->clk);
+ if (priv->phy_rst)
+ hisi_femac_phy_reset(priv);
+
+ if (netif_running(ndev)) {
+ hisi_femac_port_init(priv);
+ hisi_femac_net_open(ndev);
+ netif_device_attach(ndev);
+ }
+ enable_irq(ndev->irq);
+
+ return 0;
+}
+#endif
+
+static const struct of_device_id hisi_femac_match[] = {
+ {.compatible = "hisilicon,hisi-femac-v1",},
+ {.compatible = "hisilicon,hisi-femac-v2",},
+ {.compatible = "hisilicon,hi3516cv300-femac",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hisi_femac_match);
+
+static struct platform_driver hisi_femac_driver = {
+ .driver = {
+ .name = "hisi-femac",
+ .of_match_table = hisi_femac_match,
+ },
+ .probe = hisi_femac_drv_probe,
+ .remove = hisi_femac_drv_remove,
+#ifdef CONFIG_PM
+ .suspend = hisi_femac_drv_suspend,
+ .resume = hisi_femac_drv_resume,
+#endif
+};
+
+module_platform_driver(hisi_femac_driver);
+
+MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC driver");
+MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hisi-femac");
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index e51892d518ff..275618bb4646 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -218,7 +218,6 @@ struct hix5hd2_priv {
struct device *dev;
struct net_device *netdev;
- struct phy_device *phy;
struct device_node *phy_node;
phy_interface_t phy_mode;
@@ -402,7 +401,7 @@ static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p)
static void hix5hd2_adjust_link(struct net_device *dev)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
- struct phy_device *phy = priv->phy;
+ struct phy_device *phy = dev->phydev;
if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
hix5hd2_config_port(dev, phy->speed, phy->duplex);
@@ -636,7 +635,7 @@ static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
pos = dma_ring_incr(pos, TX_DESC_NUM);
writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
netdev_sent_queue(dev, skb->len);
@@ -679,6 +678,7 @@ static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv)
static int hix5hd2_net_open(struct net_device *dev)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct phy_device *phy;
int ret;
ret = clk_prepare_enable(priv->clk);
@@ -687,12 +687,12 @@ static int hix5hd2_net_open(struct net_device *dev)
return ret;
}
- priv->phy = of_phy_connect(dev, priv->phy_node,
- &hix5hd2_adjust_link, 0, priv->phy_mode);
- if (!priv->phy)
+ phy = of_phy_connect(dev, priv->phy_node,
+ &hix5hd2_adjust_link, 0, priv->phy_mode);
+ if (!phy)
return -ENODEV;
- phy_start(priv->phy);
+ phy_start(phy);
hix5hd2_hw_init(priv);
hix5hd2_rx_refill(priv);
@@ -716,9 +716,9 @@ static int hix5hd2_net_close(struct net_device *dev)
netif_stop_queue(dev);
hix5hd2_free_dma_desc_rings(priv);
- if (priv->phy) {
- phy_stop(priv->phy);
- phy_disconnect(priv->phy);
+ if (dev->phydev) {
+ phy_stop(dev->phydev);
+ phy_disconnect(dev->phydev);
}
clk_disable_unprepare(priv->clk);
@@ -750,32 +750,10 @@ static const struct net_device_ops hix5hd2_netdev_ops = {
.ndo_set_mac_address = hix5hd2_net_set_mac_address,
};
-static int hix5hd2_get_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
-{
- struct hix5hd2_priv *priv = netdev_priv(net_dev);
-
- if (!priv->phy)
- return -ENODEV;
-
- return phy_ethtool_gset(priv->phy, cmd);
-}
-
-static int hix5hd2_set_settings(struct net_device *net_dev,
- struct ethtool_cmd *cmd)
-{
- struct hix5hd2_priv *priv = netdev_priv(net_dev);
-
- if (!priv->phy)
- return -ENODEV;
-
- return phy_ethtool_sset(priv->phy, cmd);
-}
-
static struct ethtool_ops hix5hd2_ethtools_ops = {
.get_link = ethtool_op_get_link,
- .get_settings = hix5hd2_get_settings,
- .set_settings = hix5hd2_set_settings,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int hix5hd2_mdio_wait_ready(struct mii_bus *bus)
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 3bfe36f9405b..c54c6fac0d1d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -96,16 +96,22 @@ static int __ae_match(struct device *dev, const void *data)
{
struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
- return hdev->dev->of_node == data;
+ if (dev_of_node(hdev->dev))
+ return (data == &hdev->dev->of_node->fwnode);
+ else if (is_acpi_node(hdev->dev->fwnode))
+ return (data == hdev->dev->fwnode);
+
+ dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n");
+ return 0;
}
-static struct hnae_ae_dev *find_ae(const struct device_node *ae_node)
+static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode)
{
struct device *dev;
- WARN_ON(!ae_node);
+ WARN_ON(!fwnode);
- dev = class_find_device(hnae_class, NULL, ae_node, __ae_match);
+ dev = class_find_device(hnae_class, NULL, fwnode, __ae_match);
return dev ? cls_to_ae_dev(dev) : NULL;
}
@@ -312,7 +318,7 @@ EXPORT_SYMBOL(hnae_reinit_handle);
* return handle ptr or ERR_PTR
*/
struct hnae_handle *hnae_get_handle(struct device *owner_dev,
- const struct device_node *ae_node,
+ const struct fwnode_handle *fwnode,
u32 port_id,
struct hnae_buf_ops *bops)
{
@@ -321,7 +327,7 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev,
int i, j;
int ret;
- dev = find_ae(ae_node);
+ dev = find_ae(fwnode);
if (!dev)
return ERR_PTR(-ENODEV);
@@ -394,7 +400,6 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
if (!hdev->ops || !hdev->ops->get_handle ||
!hdev->ops->toggle_ring_irq ||
- !hdev->ops->toggle_queue_status ||
!hdev->ops->get_status || !hdev->ops->adjust_link)
return -EINVAL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index e8d36aaea223..e093cbf26c8c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -27,6 +27,7 @@
* "cb" means control block
*/
+#include <linux/acpi.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/module.h>
@@ -362,6 +363,14 @@ enum hnae_port_type {
HNAE_PORT_DEBUG
};
+/* mac media type */
+enum hnae_media_type {
+ HNAE_MEDIA_TYPE_UNKNOWN = 0,
+ HNAE_MEDIA_TYPE_FIBER,
+ HNAE_MEDIA_TYPE_COPPER,
+ HNAE_MEDIA_TYPE_BACKPLANE,
+};
+
/* This struct defines the operation on the handle.
*
* get_handle(): (mandatory)
@@ -453,7 +462,6 @@ struct hnae_ae_ops {
int (*get_info)(struct hnae_handle *handle,
u8 *auto_neg, u16 *speed, u8 *duplex);
void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
- void (*toggle_queue_status)(struct hnae_queue *queue, u32 val);
void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
int (*set_loopback)(struct hnae_handle *handle,
enum hnae_loop loop_mode, int en);
@@ -472,6 +480,11 @@ struct hnae_ae_ops {
int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
int (*set_coalesce_frames)(struct hnae_handle *handle,
u32 coalesce_frames);
+ void (*get_coalesce_range)(struct hnae_handle *handle,
+ u32 *tx_frames_low, u32 *rx_frames_low,
+ u32 *tx_frames_high, u32 *rx_frames_high,
+ u32 *tx_usecs_low, u32 *rx_usecs_low,
+ u32 *tx_usecs_high, u32 *rx_usecs_high);
void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
int (*get_mac_addr)(struct hnae_handle *handle, void **p);
int (*set_mac_addr)(struct hnae_handle *handle, void *p);
@@ -512,7 +525,7 @@ struct hnae_ae_dev {
struct hnae_handle {
struct device *owner_dev; /* the device which make use of this handle */
struct hnae_ae_dev *dev; /* the device who provides this handle */
- struct device_node *phy_node;
+ struct phy_device *phy_dev;
phy_interface_t phy_if;
u32 if_support;
int q_num;
@@ -520,6 +533,7 @@ struct hnae_handle {
u32 eport_id;
u32 dport_id; /* v2 tx bd should fill the dport_id */
enum hnae_port_type port_type;
+ enum hnae_media_type media_type;
struct list_head node; /* list to hnae_ae_dev->handle_list */
struct hnae_buf_ops *bops; /* operation for the buffer */
struct hnae_queue **qs; /* array base of all queues */
@@ -528,7 +542,7 @@ struct hnae_handle {
#define ring_to_dev(ring) ((ring)->q->dev->dev)
struct hnae_handle *hnae_get_handle(struct device *owner_dev,
- const struct device_node *ae_node,
+ const struct fwnode_handle *fwnode,
u32 port_id,
struct hnae_buf_ops *bops);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index a1cb461ac45f..e28d960997af 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -29,25 +29,6 @@ static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
return vf_cb->mac_cb;
}
-/**
- * hns_ae_map_eport_to_dport - translate enet port id to dsaf port id
- * @port_id: enet port id
- *: debug port 0-1, service port 2 -7 (dsaf mode only 2)
- * return: dsaf port id
- *: service ports 0 - 5, debug port 6-7
- **/
-static int hns_ae_map_eport_to_dport(u32 port_id)
-{
- int port_index;
-
- if (port_id < DSAF_DEBUG_NW_NUM)
- port_index = port_id + DSAF_SERVICE_PORT_NUM_PER_DSAF;
- else
- port_index = port_id - DSAF_DEBUG_NW_NUM;
-
- return port_index;
-}
-
static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
{
return container_of(dev, struct dsaf_device, ae_dev);
@@ -56,50 +37,35 @@ static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle)
{
int ppe_index;
- int ppe_common_index;
struct ppe_common_cb *ppe_comm;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
- if (vf_cb->port_index < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
- ppe_index = vf_cb->port_index;
- ppe_common_index = 0;
- } else {
- ppe_index = 0;
- ppe_common_index =
- vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
- }
- ppe_comm = vf_cb->dsaf_dev->ppe_common[ppe_common_index];
+ ppe_comm = vf_cb->dsaf_dev->ppe_common[0];
+ ppe_index = vf_cb->port_index;
+
return &ppe_comm->ppe_cb[ppe_index];
}
static int hns_ae_get_q_num_per_vf(
struct dsaf_device *dsaf_dev, int port)
{
- int common_idx = hns_dsaf_get_comm_idx_by_port(port);
-
- return dsaf_dev->rcb_common[common_idx]->max_q_per_vf;
+ return dsaf_dev->rcb_common[0]->max_q_per_vf;
}
static int hns_ae_get_vf_num_per_port(
struct dsaf_device *dsaf_dev, int port)
{
- int common_idx = hns_dsaf_get_comm_idx_by_port(port);
-
- return dsaf_dev->rcb_common[common_idx]->max_vfn;
+ return dsaf_dev->rcb_common[0]->max_vfn;
}
static struct ring_pair_cb *hns_ae_get_base_ring_pair(
struct dsaf_device *dsaf_dev, int port)
{
- int common_idx = hns_dsaf_get_comm_idx_by_port(port);
- struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[common_idx];
+ struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[0];
int q_num = rcb_comm->max_q_per_vf;
int vf_num = rcb_comm->max_vfn;
- if (common_idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
- return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
- else
- return &rcb_comm->ring_pair_cb[0];
+ return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
}
static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
@@ -110,7 +76,6 @@ static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
u32 port_id)
{
- int port_idx;
int vfnum_per_port;
int qnum_per_vf;
int i;
@@ -120,11 +85,10 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
struct hnae_vf_cb *vf_cb;
dsaf_dev = hns_ae_get_dsaf_dev(dev);
- port_idx = hns_ae_map_eport_to_dport(port_id);
- ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_idx);
- vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_idx);
- qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_idx);
+ ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_id);
+ vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id);
+ qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id);
vf_cb = kzalloc(sizeof(*vf_cb) +
qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
@@ -163,14 +127,15 @@ struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
}
vf_cb->dsaf_dev = dsaf_dev;
- vf_cb->port_index = port_idx;
- vf_cb->mac_cb = &dsaf_dev->mac_cb[port_idx];
+ vf_cb->port_index = port_id;
+ vf_cb->mac_cb = dsaf_dev->mac_cb[port_id];
ae_handle->phy_if = vf_cb->mac_cb->phy_if;
- ae_handle->phy_node = vf_cb->mac_cb->phy_node;
+ ae_handle->phy_dev = vf_cb->mac_cb->phy_dev;
ae_handle->if_support = vf_cb->mac_cb->if_support;
ae_handle->port_type = vf_cb->mac_cb->mac_type;
- ae_handle->dport_id = port_idx;
+ ae_handle->media_type = vf_cb->mac_cb->media_type;
+ ae_handle->dport_id = port_id;
return ae_handle;
vf_id_err:
@@ -283,12 +248,21 @@ static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable)
static int hns_ae_start(struct hnae_handle *handle)
{
int ret;
+ int k;
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
ret = hns_mac_vm_config_bc_en(mac_cb, 0, true);
if (ret)
return ret;
+ for (k = 0; k < handle->q_num; k++) {
+ if (AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver))
+ hns_rcb_int_clr_hw(handle->qs[k],
+ RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
+ else
+ hns_rcbv2_int_clr_hw(handle->qs[k],
+ RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
+ }
hns_ae_ring_enable_all(handle, 1);
msleep(100);
@@ -320,11 +294,8 @@ static void hns_ae_reset(struct hnae_handle *handle)
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) {
- u8 ppe_common_index =
- vf_cb->port_index - DSAF_SERVICE_PORT_NUM_PER_DSAF + 1;
-
hns_mac_reset(vf_cb->mac_cb);
- hns_ppe_reset_common(vf_cb->dsaf_dev, ppe_common_index);
+ hns_ppe_reset_common(vf_cb->dsaf_dev, 0);
}
}
@@ -352,18 +323,6 @@ static void hns_aev2_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
hns_rcbv2_int_ctrl_hw(ring->q, flag, mask);
}
-static void hns_ae_toggle_queue_status(struct hnae_queue *queue, u32 val)
-{
- struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(queue->dev);
-
- if (AE_IS_VER1(dsaf_dev->dsaf_ver))
- hns_rcb_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
- else
- hns_rcbv2_int_clr_hw(queue, RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
-
- hns_rcb_start(queue, val);
-}
-
static int hns_ae_get_link_status(struct hnae_handle *handle)
{
u32 link_status;
@@ -399,11 +358,16 @@ static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
static void hns_ae_get_pauseparam(struct hnae_handle *handle,
u32 *auto_neg, u32 *rx_en, u32 *tx_en)
{
- assert(handle);
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+
+ hns_mac_get_autoneg(mac_cb, auto_neg);
- hns_mac_get_autoneg(hns_get_mac_cb(handle), auto_neg);
+ hns_mac_get_pauseparam(mac_cb, rx_en, tx_en);
- hns_mac_get_pauseparam(hns_get_mac_cb(handle), rx_en, tx_en);
+ /* Service port's pause feature is provided by DSAF, not mac */
+ if (handle->port_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en);
}
static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
@@ -436,12 +400,21 @@ static int hns_ae_set_pauseparam(struct hnae_handle *handle,
u32 autoneg, u32 rx_en, u32 tx_en)
{
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
int ret;
ret = hns_mac_set_autoneg(mac_cb, autoneg);
if (ret)
return ret;
+ /* Service port's pause feature is provided by DSAF, not mac */
+ if (handle->port_type == HNAE_PORT_SERVICE) {
+ ret = hns_dsaf_set_rx_mac_pause_en(dsaf_dev,
+ mac_cb->mac_id, rx_en);
+ if (ret)
+ return ret;
+ rx_en = 0;
+ }
return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en);
}
@@ -490,6 +463,30 @@ static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
ring_pair->port_id_in_comm, coalesce_frames);
}
+static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
+ u32 *tx_frames_low, u32 *rx_frames_low,
+ u32 *tx_frames_high, u32 *rx_frames_high,
+ u32 *tx_usecs_low, u32 *rx_usecs_low,
+ u32 *tx_usecs_high, u32 *rx_usecs_high)
+{
+ struct dsaf_device *dsaf_dev;
+
+ dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+
+ *tx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES;
+ *rx_frames_low = HNS_RCB_MIN_COALESCED_FRAMES;
+ *tx_frames_high =
+ (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
+ HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
+ *rx_frames_high =
+ (dsaf_dev->desc_num - 1 > HNS_RCB_MAX_COALESCED_FRAMES) ?
+ HNS_RCB_MAX_COALESCED_FRAMES : dsaf_dev->desc_num - 1;
+ *tx_usecs_low = 0;
+ *rx_usecs_low = 0;
+ *tx_usecs_high = HNS_RCB_MAX_COALESCED_USECS;
+ *rx_usecs_high = HNS_RCB_MAX_COALESCED_USECS;
+}
+
void hns_ae_update_stats(struct hnae_handle *handle,
struct net_device_stats *net_stats)
{
@@ -612,6 +609,7 @@ void hns_ae_get_strings(struct hnae_handle *handle,
int idx;
struct hns_mac_cb *mac_cb;
struct hns_ppe_cb *ppe_cb;
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
u8 *p = data;
struct hnae_vf_cb *vf_cb;
@@ -634,13 +632,14 @@ void hns_ae_get_strings(struct hnae_handle *handle,
p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
if (mac_cb->mac_type == HNAE_PORT_SERVICE)
- hns_dsaf_get_strings(stringset, p, port);
+ hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
}
int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
{
u32 sset_count = 0;
struct hns_mac_cb *mac_cb;
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
assert(handle);
@@ -651,7 +650,7 @@ int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
sset_count += hns_mac_get_sset_count(mac_cb, stringset);
if (mac_cb->mac_type == HNAE_PORT_SERVICE)
- sset_count += hns_dsaf_get_sset_count(stringset);
+ sset_count += hns_dsaf_get_sset_count(dsaf_dev, stringset);
return sset_count;
}
@@ -662,13 +661,15 @@ static int hns_ae_config_loopback(struct hnae_handle *handle,
int ret;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
switch (loop) {
case MAC_INTERNALLOOP_PHY:
ret = 0;
break;
case MAC_INTERNALLOOP_SERDES:
- ret = hns_mac_config_sds_loopback(vf_cb->mac_cb, en);
+ ret = dsaf_dev->misc_op->cfg_serdes_loopback(vf_cb->mac_cb,
+ !!en);
break;
case MAC_INTERNALLOOP_MAC:
ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en);
@@ -689,7 +690,7 @@ void hns_ae_update_led_status(struct hnae_handle *handle)
assert(handle);
mac_cb = hns_get_mac_cb(handle);
- if (!mac_cb->cpld_vaddr)
+ if (!mac_cb->cpld_ctrl)
return;
hns_set_led_opt(mac_cb);
}
@@ -709,7 +710,6 @@ int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
void hns_ae_get_regs(struct hnae_handle *handle, void *data)
{
u32 *p = data;
- u32 rcb_com_idx;
int i;
struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
@@ -717,8 +717,7 @@ void hns_ae_get_regs(struct hnae_handle *handle, void *data)
hns_ppe_get_regs(ppe_cb, p);
p += hns_ppe_get_regs_count();
- rcb_com_idx = hns_dsaf_get_comm_idx_by_port(vf_cb->port_index);
- hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[rcb_com_idx], p);
+ hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[0], p);
p += hns_rcb_get_common_regs_count();
for (i = 0; i < handle->q_num; i++) {
@@ -807,7 +806,6 @@ static struct hnae_ae_ops hns_dsaf_ops = {
.stop = hns_ae_stop,
.reset = hns_ae_reset,
.toggle_ring_irq = hns_ae_toggle_ring_irq,
- .toggle_queue_status = hns_ae_toggle_queue_status,
.get_status = hns_ae_get_link_status,
.get_info = hns_ae_get_mac_info,
.adjust_link = hns_ae_adjust_link,
@@ -821,6 +819,7 @@ static struct hnae_ae_ops hns_dsaf_ops = {
.get_rx_max_coalesced_frames = hns_ae_get_rx_max_coalesced_frames,
.set_coalesce_usecs = hns_ae_set_coalesce_usecs,
.set_coalesce_frames = hns_ae_set_coalesce_frames,
+ .get_coalesce_range = hns_ae_get_coalesce_range,
.set_promisc_mode = hns_ae_set_promisc_mode,
.set_mac_addr = hns_ae_set_mac_address,
.set_mc_addr = hns_ae_set_multicast_one,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 44abb08de155..1e1eb92998fb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = {
{"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)},
{"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)},
{"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
- {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+ {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
{"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
{"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)},
{"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
@@ -110,7 +110,7 @@ static void hns_gmac_free(void *mac_drv)
u32 mac_id = drv->mac_id;
- hns_dsaf_ge_srst_by_port(dsaf_dev, mac_id, 0);
+ dsaf_dev->misc_op->ge_srst(dsaf_dev, mac_id, 0);
}
static void hns_gmac_set_tx_auto_pause_frames(void *mac_drv, u16 newval)
@@ -317,9 +317,9 @@ static void hns_gmac_init(void *mac_drv)
port = drv->mac_id;
- hns_dsaf_ge_srst_by_port(dsaf_dev, port, 0);
+ dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 0);
mdelay(10);
- hns_dsaf_ge_srst_by_port(dsaf_dev, port, 1);
+ dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 1);
mdelay(10);
hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
hns_gmac_tx_loop_pkt_dis(mac_drv);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index a38084a22bf2..5c8afe1a5ccb 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -7,18 +7,21 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
+#include <linux/acpi.h>
#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/phy_fixed.h>
#include <linux/interrupt.h>
-#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
-#include "hns_dsaf_misc.h"
#include "hns_dsaf_main.h"
+#include "hns_dsaf_misc.h"
#include "hns_dsaf_rcb.h"
#define MAC_EN_FLAG_V 0xada0328
@@ -53,20 +56,6 @@ static const enum mac_mode g_mac_mode_1000[] = {
[PHY_INTERFACE_MODE_RTBI] = MAC_MODE_RTBI_1000
};
-static enum mac_mode hns_mac_dev_to_enet_if(const struct hns_mac_cb *mac_cb)
-{
- switch (mac_cb->max_speed) {
- case MAC_SPEED_100:
- return g_mac_mode_100[mac_cb->phy_if];
- case MAC_SPEED_1000:
- return g_mac_mode_1000[mac_cb->phy_if];
- case MAC_SPEED_10000:
- return MAC_MODE_XGMII_10000;
- default:
- return MAC_MODE_MII_100;
- }
-}
-
static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
{
switch (mac_cb->max_speed) {
@@ -81,17 +70,6 @@ static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
}
}
-int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
-{
- if (!mac_cb->cpld_vaddr)
- return -ENODEV;
-
- *sfp_prsnt = !dsaf_read_b((u8 *)mac_cb->cpld_vaddr
- + MAC_SFP_PORT_OFFSET);
-
- return 0;
-}
-
void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
{
struct mac_driver *mac_ctrl_drv;
@@ -104,7 +82,7 @@ void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
else
*link_status = 0;
- ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
+ ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb, &sfp_prsnt);
if (!ret)
*link_status = *link_status && sfp_prsnt;
@@ -142,7 +120,6 @@ void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
mac_cb->speed = speed;
mac_cb->half_duplex = !duplex;
- mac_ctrl_drv->mac_mode = hns_mac_dev_to_enet_if(mac_cb);
if (mac_ctrl_drv->adjust_link) {
ret = mac_ctrl_drv->adjust_link(mac_ctrl_drv,
@@ -168,10 +145,9 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
u8 vmid, u8 *port_num)
{
u8 tmp_port;
- u32 comm_idx;
if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
- if (mac_cb->mac_id != DSAF_MAX_PORT_NUM_PER_CHIP) {
+ if (mac_cb->mac_id != DSAF_MAX_PORT_NUM) {
dev_err(mac_cb->dev,
"input invalid,%s mac%d vmid%d !\n",
mac_cb->dsaf_dev->ae_dev.name,
@@ -179,7 +155,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
return -EINVAL;
}
} else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
- if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM_PER_CHIP) {
+ if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM) {
dev_err(mac_cb->dev,
"input invalid,%s mac%d vmid%d!\n",
mac_cb->dsaf_dev->ae_dev.name,
@@ -192,9 +168,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
return -EINVAL;
}
- comm_idx = hns_dsaf_get_comm_idx_by_port(mac_cb->mac_id);
-
- if (vmid >= mac_cb->dsaf_dev->rcb_common[comm_idx]->max_vfn) {
+ if (vmid >= mac_cb->dsaf_dev->rcb_common[0]->max_vfn) {
dev_err(mac_cb->dev, "input invalid,%s mac%d vmid%d !\n",
mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid);
return -EINVAL;
@@ -234,7 +208,7 @@ static int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
}
/**
- *hns_mac_get_inner_port_num - change vf mac address
+ *hns_mac_change_vf_addr - change vf mac address
*@mac_cb: mac device
*@vmid: vmid
*@addr:mac address
@@ -249,7 +223,7 @@ int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb,
struct mac_entry_idx *old_entry;
old_entry = &mac_cb->addr_entry_idx[vmid];
- if (dsaf_dev) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
mac_entry.in_vlan_id = old_entry->vlan_id;
mac_entry.in_port_num = mac_cb->mac_id;
@@ -289,7 +263,7 @@ int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
struct dsaf_drv_mac_single_dest_entry mac_entry;
- if (dsaf_dev && addr) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev) && addr) {
memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
mac_entry.in_vlan_id = 0;/*vlan_id;*/
mac_entry.in_port_num = mac_cb->mac_id;
@@ -380,7 +354,7 @@ static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb,
if (mac_cb->mac_type == HNAE_PORT_DEBUG)
return 0;
- if (dsaf_dev) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
mac_entry.in_vlan_id = vlan_id;
mac_entry.in_port_num = mac_cb->mac_id;
@@ -418,7 +392,7 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
uc_mac_entry = &mac_cb->addr_entry_idx[vmid];
- if (dsaf_dev) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
mac_entry.in_vlan_id = uc_mac_entry->vlan_id;
mac_entry.in_port_num = mac_cb->mac_id;
@@ -439,9 +413,8 @@ int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
void hns_mac_reset(struct hns_mac_cb *mac_cb)
{
- struct mac_driver *drv;
-
- drv = hns_mac_get_drv(mac_cb);
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+ bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
drv->mac_init(drv);
@@ -456,7 +429,7 @@ void hns_mac_reset(struct hns_mac_cb *mac_cb)
if (drv->mac_pausefrm_cfg) {
if (mac_cb->mac_type == HNAE_PORT_DEBUG)
- drv->mac_pausefrm_cfg(drv, 0, 0);
+ drv->mac_pausefrm_cfg(drv, !is_ver1, !is_ver1);
else /* mac rx must disable, dsaf pfc close instead of it*/
drv->mac_pausefrm_cfg(drv, 0, 1);
}
@@ -525,7 +498,7 @@ void hns_mac_stop(struct hns_mac_cb *mac_cb)
mac_ctrl_drv->mac_en_flg = 0;
mac_cb->link = 0;
- cpld_led_reset(mac_cb);
+ mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb);
}
/**
@@ -561,14 +534,6 @@ void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en)
*rx_en = 0;
*tx_en = 0;
}
-
- /* Due to the chip defect, the service mac's rx pause CAN'T be enabled.
- * We set the rx pause frm always be true (1), because DSAF deals with
- * the rx pause frm instead of service mac. After all, we still support
- * rx pause frm.
- */
- if (mac_cb->mac_type == HNAE_PORT_SERVICE)
- *rx_en = 1;
}
/**
@@ -602,20 +567,13 @@ int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable)
int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en)
{
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+ bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
- if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
- if (!rx_en) {
- dev_err(mac_cb->dev, "disable rx_pause is not allowed!");
- return -EINVAL;
- }
- } else if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
- if (tx_en || rx_en) {
- dev_err(mac_cb->dev, "enable tx_pause or enable rx_pause are not allowed!");
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
+ if (is_ver1 && (tx_en || rx_en)) {
+ dev_err(mac_cb->dev, "macv1 cann't enable tx/rx_pause!");
return -EINVAL;
}
- } else {
- dev_err(mac_cb->dev, "Unsupport this operation!");
- return -EINVAL;
}
if (mac_ctrl_drv->mac_pausefrm_cfg)
@@ -666,17 +624,145 @@ free_mac_drv:
return ret;
}
+static int
+hns_mac_phy_parse_addr(struct device *dev, struct fwnode_handle *fwnode)
+{
+ u32 addr;
+ int ret;
+
+ ret = fwnode_property_read_u32(fwnode, "phy-addr", &addr);
+ if (ret) {
+ dev_err(dev, "has invalid PHY address ret:%d\n", ret);
+ return ret;
+ }
+
+ if (addr >= PHY_MAX_ADDR) {
+ dev_err(dev, "PHY address %i is too large\n", addr);
+ return -EINVAL;
+ }
+
+ return addr;
+}
+
+static int hns_mac_phydev_match(struct device *dev, void *fwnode)
+{
+ return dev->fwnode == fwnode;
+}
+
+static struct
+platform_device *hns_mac_find_platform_device(struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ dev = bus_find_device(&platform_bus_type, NULL,
+ fwnode, hns_mac_phydev_match);
+ return dev ? to_platform_device(dev) : NULL;
+}
+
+static int
+hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
+ u32 addr)
+{
+ struct phy_device *phy;
+ const char *phy_type;
+ bool is_c45;
+ int rc;
+
+ rc = fwnode_property_read_string(mac_cb->fw_port,
+ "phy-mode", &phy_type);
+ if (rc < 0)
+ return rc;
+
+ if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_XGMII)))
+ is_c45 = 1;
+ else if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_SGMII)))
+ is_c45 = 0;
+ else
+ return -ENODATA;
+
+ phy = get_phy_device(mdio, addr, is_c45);
+ if (!phy || IS_ERR(phy))
+ return -EIO;
+
+ if (mdio->irq)
+ phy->irq = mdio->irq[addr];
+
+ /* All data is now stored in the phy struct;
+ * register it
+ */
+ rc = phy_device_register(phy);
+ if (rc) {
+ phy_device_free(phy);
+ return -ENODEV;
+ }
+
+ mac_cb->phy_dev = phy;
+
+ dev_dbg(&mdio->dev, "registered phy at address %i\n", addr);
+
+ return 0;
+}
+
+static void hns_mac_register_phy(struct hns_mac_cb *mac_cb)
+{
+ struct acpi_reference_args args;
+ struct platform_device *pdev;
+ struct mii_bus *mii_bus;
+ int rc;
+ int addr;
+
+ /* Loop over the child nodes and register a phy_device for each one */
+ if (!to_acpi_device_node(mac_cb->fw_port))
+ return;
+
+ rc = acpi_node_get_property_reference(
+ mac_cb->fw_port, "mdio-node", 0, &args);
+ if (rc)
+ return;
+
+ addr = hns_mac_phy_parse_addr(mac_cb->dev, mac_cb->fw_port);
+ if (addr < 0)
+ return;
+
+ /* dev address in adev */
+ pdev = hns_mac_find_platform_device(acpi_fwnode_handle(args.adev));
+ mii_bus = platform_get_drvdata(pdev);
+ rc = hns_mac_register_phydev(mii_bus, mac_cb, addr);
+ if (!rc)
+ dev_dbg(mac_cb->dev, "mac%d register phy addr:%d\n",
+ mac_cb->mac_id, addr);
+}
+
+#define MAC_MEDIA_TYPE_MAX_LEN 16
+
+static const struct {
+ enum hnae_media_type value;
+ const char *name;
+} media_type_defs[] = {
+ {HNAE_MEDIA_TYPE_UNKNOWN, "unknown" },
+ {HNAE_MEDIA_TYPE_FIBER, "fiber" },
+ {HNAE_MEDIA_TYPE_COPPER, "copper" },
+ {HNAE_MEDIA_TYPE_BACKPLANE, "backplane" },
+};
+
/**
- *mac_free_dev - get mac information from device node
+ *hns_mac_get_info - get mac information from device node
*@mac_cb: mac device
*@np:device node
- *@mac_mode_idx:mac mode index
+ * return: 0 --success, negative --fail
*/
-static void hns_mac_get_info(struct hns_mac_cb *mac_cb,
- struct device_node *np, u32 mac_mode_idx)
+static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
{
+ struct device_node *np;
+ struct regmap *syscon;
+ struct of_phandle_args cpld_args;
+ const char *media_type;
+ u32 i;
+ u32 ret;
+
mac_cb->link = false;
mac_cb->half_duplex = false;
+ mac_cb->media_type = HNAE_MEDIA_TYPE_UNKNOWN;
mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if];
mac_cb->max_speed = mac_cb->speed;
@@ -690,12 +776,109 @@ static void hns_mac_get_info(struct hns_mac_cb *mac_cb,
mac_cb->max_frm = MAC_DEFAULT_MTU;
mac_cb->tx_pause_frm_time = MAC_DEFAULT_PAUSE_TIME;
+ mac_cb->port_rst_off = mac_cb->mac_id;
+ mac_cb->port_mode_off = 0;
+
+ /* if the dsaf node doesn't contain a port subnode, get phy-handle
+ * from dsaf node
+ */
+ if (!mac_cb->fw_port) {
+ np = of_parse_phandle(mac_cb->dev->of_node, "phy-handle",
+ mac_cb->mac_id);
+ mac_cb->phy_dev = of_phy_find_device(np);
+ if (mac_cb->phy_dev) {
+ /* refcount is held by of_phy_find_device()
+ * if the phy_dev is found
+ */
+ put_device(&mac_cb->phy_dev->mdio.dev);
+
+ dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
+ mac_cb->mac_id, np->name);
+ }
+ of_node_put(np);
+
+ return 0;
+ }
+
+ if (is_of_node(mac_cb->fw_port)) {
+ /* parse property from port subnode in dsaf */
+ np = of_parse_phandle(to_of_node(mac_cb->fw_port),
+ "phy-handle", 0);
+ mac_cb->phy_dev = of_phy_find_device(np);
+ if (mac_cb->phy_dev) {
+ /* refcount is held by of_phy_find_device()
+ * if the phy_dev is found
+ */
+ put_device(&mac_cb->phy_dev->mdio.dev);
+ dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
+ mac_cb->mac_id, np->name);
+ }
+ of_node_put(np);
+
+ np = of_parse_phandle(to_of_node(mac_cb->fw_port),
+ "serdes-syscon", 0);
+ syscon = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR_OR_NULL(syscon)) {
+ dev_err(mac_cb->dev, "serdes-syscon is needed!\n");
+ return -EINVAL;
+ }
+ mac_cb->serdes_ctrl = syscon;
+
+ ret = fwnode_property_read_u32(mac_cb->fw_port,
+ "port-rst-offset",
+ &mac_cb->port_rst_off);
+ if (ret) {
+ dev_dbg(mac_cb->dev,
+ "mac%d port-rst-offset not found, use default value.\n",
+ mac_cb->mac_id);
+ }
+
+ ret = fwnode_property_read_u32(mac_cb->fw_port,
+ "port-mode-offset",
+ &mac_cb->port_mode_off);
+ if (ret) {
+ dev_dbg(mac_cb->dev,
+ "mac%d port-mode-offset not found, use default value.\n",
+ mac_cb->mac_id);
+ }
+
+ ret = of_parse_phandle_with_fixed_args(
+ to_of_node(mac_cb->fw_port), "cpld-syscon", 1, 0,
+ &cpld_args);
+ if (ret) {
+ dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n",
+ mac_cb->mac_id);
+ mac_cb->cpld_ctrl = NULL;
+ } else {
+ syscon = syscon_node_to_regmap(cpld_args.np);
+ if (IS_ERR_OR_NULL(syscon)) {
+ dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
+ mac_cb->cpld_ctrl = NULL;
+ } else {
+ mac_cb->cpld_ctrl = syscon;
+ mac_cb->cpld_ctrl_reg = cpld_args.args[0];
+ }
+ }
+ } else if (is_acpi_node(mac_cb->fw_port)) {
+ hns_mac_register_phy(mac_cb);
+ } else {
+ dev_err(mac_cb->dev, "mac%d cannot find phy node\n",
+ mac_cb->mac_id);
+ }
+
+ if (!fwnode_property_read_string(mac_cb->fw_port, "media-type",
+ &media_type)) {
+ for (i = 0; i < ARRAY_SIZE(media_type_defs); i++) {
+ if (!strncmp(media_type_defs[i].name, media_type,
+ MAC_MEDIA_TYPE_MAX_LEN)) {
+ mac_cb->media_type = media_type_defs[i].value;
+ break;
+ }
+ }
+ }
- /* Get the rest of the PHY information */
- mac_cb->phy_node = of_parse_phandle(np, "phy-handle", mac_cb->mac_id);
- if (mac_cb->phy_node)
- dev_dbg(mac_cb->dev, "mac%d phy_node: %s\n",
- mac_cb->mac_id, mac_cb->phy_node->name);
+ return 0;
}
/**
@@ -725,45 +908,36 @@ u8 __iomem *hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
return base + 0x40000 + mac_id * 0x4000 -
mac_mode_idx * 0x20000;
else
- return mac_cb->serdes_vaddr + 0x1000
- + (mac_id - DSAF_SERVICE_PORT_NUM_PER_DSAF) * 0x100000;
+ return dsaf_dev->ppe_base + 0x1000;
}
/**
* hns_mac_get_cfg - get mac cfg from dtb or acpi table
* @dsaf_dev: dsa fabric device struct pointer
- * @mac_idx: mac index
- * retuen 0 - success , negative --fail
+ * @mac_cb: mac control block
+ * return 0 - success , negative --fail
*/
-int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx)
+int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
{
int ret;
u32 mac_mode_idx;
- struct hns_mac_cb *mac_cb = &dsaf_dev->mac_cb[mac_idx];
mac_cb->dsaf_dev = dsaf_dev;
mac_cb->dev = dsaf_dev->dev;
- mac_cb->mac_id = mac_idx;
mac_cb->sys_ctl_vaddr = dsaf_dev->sc_base;
mac_cb->serdes_vaddr = dsaf_dev->sds_base;
- if (dsaf_dev->cpld_base &&
- mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF) {
- mac_cb->cpld_vaddr = dsaf_dev->cpld_base +
- mac_cb->mac_id * CPLD_ADDR_PORT_OFFSET;
- cpld_led_reset(mac_cb);
- }
mac_cb->sfp_prsnt = 0;
mac_cb->txpkt_for_led = 0;
mac_cb->rxpkt_for_led = 0;
- if (mac_idx < DSAF_SERVICE_PORT_NUM_PER_DSAF)
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
mac_cb->mac_type = HNAE_PORT_SERVICE;
else
mac_cb->mac_type = HNAE_PORT_DEBUG;
- mac_cb->phy_if = hns_mac_get_phy_if(mac_cb);
+ mac_cb->phy_if = dsaf_dev->misc_op->get_phy_if(mac_cb);
ret = hns_mac_get_mode(mac_cb->phy_if);
if (ret < 0) {
@@ -774,53 +948,100 @@ int hns_mac_get_cfg(struct dsaf_device *dsaf_dev, int mac_idx)
}
mac_mode_idx = (u32)ret;
- hns_mac_get_info(mac_cb, mac_cb->dev->of_node, mac_mode_idx);
+ ret = hns_mac_get_info(mac_cb);
+ if (ret)
+ return ret;
+ mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb);
mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx);
return 0;
}
+static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
+{
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return 1;
+ else
+ return DSAF_MAX_PORT_NUM;
+}
+
/**
* hns_mac_init - init mac
* @dsaf_dev: dsa fabric device struct pointer
- * retuen 0 - success , negative --fail
+ * return 0 - success , negative --fail
*/
int hns_mac_init(struct dsaf_device *dsaf_dev)
{
- int i;
+ bool found = false;
int ret;
- size_t size;
+ u32 port_id;
+ int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
struct hns_mac_cb *mac_cb;
+ struct fwnode_handle *child;
- size = sizeof(struct hns_mac_cb) * DSAF_MAX_PORT_NUM_PER_CHIP;
- dsaf_dev->mac_cb = devm_kzalloc(dsaf_dev->dev, size, GFP_KERNEL);
- if (!dsaf_dev->mac_cb)
- return -ENOMEM;
+ device_for_each_child_node(dsaf_dev->dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &port_id);
+ if (ret) {
+ dev_err(dsaf_dev->dev,
+ "get reg fail, ret=%d!\n", ret);
+ return ret;
+ }
+ if (port_id >= max_port_num) {
+ dev_err(dsaf_dev->dev,
+ "reg(%u) out of range!\n", port_id);
+ return -EINVAL;
+ }
+ mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
+ GFP_KERNEL);
+ if (!mac_cb)
+ return -ENOMEM;
+ mac_cb->fw_port = child;
+ mac_cb->mac_id = (u8)port_id;
+ dsaf_dev->mac_cb[port_id] = mac_cb;
+ found = true;
+ }
- for (i = 0; i < DSAF_MAX_PORT_NUM_PER_CHIP; i++) {
- ret = hns_mac_get_cfg(dsaf_dev, i);
- if (ret)
- goto free_mac_cb;
+ /* if don't get any port subnode from dsaf node
+ * will init all port then, this is compatible with the old dts
+ */
+ if (!found) {
+ for (port_id = 0; port_id < max_port_num; port_id++) {
+ mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
+ GFP_KERNEL);
+ if (!mac_cb)
+ return -ENOMEM;
+
+ mac_cb->mac_id = port_id;
+ dsaf_dev->mac_cb[port_id] = mac_cb;
+ }
+ }
+ /* init mac_cb for all port */
+ for (port_id = 0; port_id < max_port_num; port_id++) {
+ mac_cb = dsaf_dev->mac_cb[port_id];
+ if (!mac_cb)
+ continue;
- mac_cb = &dsaf_dev->mac_cb[i];
+ ret = hns_mac_get_cfg(dsaf_dev, mac_cb);
+ if (ret)
+ return ret;
ret = hns_mac_init_ex(mac_cb);
if (ret)
- goto free_mac_cb;
+ return ret;
}
return 0;
-
-free_mac_cb:
- dsaf_dev->mac_cb = NULL;
-
- return ret;
}
void hns_mac_uninit(struct dsaf_device *dsaf_dev)
{
- cpld_led_reset(dsaf_dev->mac_cb);
- dsaf_dev->mac_cb = NULL;
+ int i;
+ int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
+
+ for (i = 0; i < max_port_num; i++) {
+ dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
+ dsaf_dev->mac_cb[i] = NULL;
+ }
}
int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
@@ -901,15 +1122,15 @@ void hns_set_led_opt(struct hns_mac_cb *mac_cb)
nic_data = 0;
mac_cb->txpkt_for_led = mac_cb->hw_stats.tx_good_pkts;
mac_cb->rxpkt_for_led = mac_cb->hw_stats.rx_good_pkts;
- hns_cpld_set_led(mac_cb, (int)mac_cb->link,
+ mac_cb->dsaf_dev->misc_op->cpld_set_led(mac_cb, (int)mac_cb->link,
mac_cb->speed, nic_data);
}
int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
enum hnae_led_state status)
{
- if (!mac_cb || !mac_cb->cpld_vaddr)
+ if (!mac_cb || !mac_cb->cpld_ctrl)
return 0;
- return cpld_set_led_id(mac_cb, status);
+ return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 823b6e78c8aa..4cbdf14f5c16 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -10,9 +10,10 @@
#ifndef _HNS_DSAF_MAC_H
#define _HNS_DSAF_MAC_H
-#include <linux/phy.h>
-#include <linux/kernel.h>
#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
#include "hns_dsaf_main.h"
struct dsaf_device;
@@ -310,10 +311,15 @@ struct hns_mac_cb {
struct device *dev;
struct dsaf_device *dsaf_dev;
struct mac_priv priv;
+ struct fwnode_handle *fw_port;
u8 __iomem *vaddr;
- u8 __iomem *cpld_vaddr;
u8 __iomem *sys_ctl_vaddr;
u8 __iomem *serdes_vaddr;
+ struct regmap *serdes_ctrl;
+ struct regmap *cpld_ctrl;
+ u32 cpld_ctrl_reg;
+ u32 port_rst_off;
+ u32 port_mode_off;
struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM];
u8 sfp_prsnt;
u8 cpld_led_value;
@@ -329,10 +335,11 @@ struct hns_mac_cb {
u64 txpkt_for_led;
u64 rxpkt_for_led;
enum hnae_port_type mac_type;
+ enum hnae_media_type media_type;
phy_interface_t phy_if;
enum hnae_loop loop_mode;
- struct device_node *phy_node;
+ struct phy_device *phy_dev;
struct mac_hw_stats hw_stats;
};
@@ -442,8 +449,6 @@ int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en);
int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu);
int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
u8 *auto_neg, u16 *speed, u8 *duplex);
-phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb);
-int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en);
int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
enum hnae_loop loop, int en);
void hns_mac_update_stats(struct hns_mac_cb *mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index 5978a5c8ef35..afb5daa3721d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -7,43 +7,71 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/platform_device.h>
+#include <linux/mfd/syscon.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
#include <linux/vmalloc.h>
+#include "hns_dsaf_mac.h"
#include "hns_dsaf_main.h"
-#include "hns_dsaf_rcb.h"
#include "hns_dsaf_ppe.h"
-#include "hns_dsaf_mac.h"
+#include "hns_dsaf_rcb.h"
+#include "hns_dsaf_misc.h"
const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
[DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
[DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss",
[DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf",
+ [DSAF_MODE_DISABLE_SP] = "single-port",
};
+static const struct acpi_device_id hns_dsaf_acpi_match[] = {
+ { "HISI00B1", 0 },
+ { "HISI00B2", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hns_dsaf_acpi_match);
+
int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
{
int ret, i;
u32 desc_num;
u32 buf_size;
+ u32 reset_offset = 0;
+ u32 res_idx = 0;
const char *mode_str;
- struct device_node *np = dsaf_dev->dev->of_node;
-
- if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
- dsaf_dev->dsaf_ver = AE_VERSION_1;
- else
- dsaf_dev->dsaf_ver = AE_VERSION_2;
+ struct regmap *syscon;
+ struct resource *res;
+ struct device_node *np = dsaf_dev->dev->of_node, *np_temp;
+ struct platform_device *pdev = to_platform_device(dsaf_dev->dev);
+
+ if (dev_of_node(dsaf_dev->dev)) {
+ if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
+ dsaf_dev->dsaf_ver = AE_VERSION_1;
+ else
+ dsaf_dev->dsaf_ver = AE_VERSION_2;
+ } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
+ if (acpi_dev_found(hns_dsaf_acpi_match[0].id))
+ dsaf_dev->dsaf_ver = AE_VERSION_1;
+ else if (acpi_dev_found(hns_dsaf_acpi_match[1].id))
+ dsaf_dev->dsaf_ver = AE_VERSION_2;
+ else
+ return -ENXIO;
+ } else {
+ dev_err(dsaf_dev->dev, "cannot get cfg data from of or acpi\n");
+ return -ENXIO;
+ }
- ret = of_property_read_string(np, "mode", &mode_str);
+ ret = device_property_read_string(dsaf_dev->dev, "mode", &mode_str);
if (ret) {
dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret);
return ret;
@@ -73,56 +101,99 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
else
dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE;
- dsaf_dev->sc_base = of_iomap(np, 0);
- if (!dsaf_dev->sc_base) {
- dev_err(dsaf_dev->dev,
- "%s of_iomap 0 fail!\n", dsaf_dev->ae_dev.name);
- ret = -ENOMEM;
- goto unmap_base_addr;
- }
-
- dsaf_dev->sds_base = of_iomap(np, 1);
- if (!dsaf_dev->sds_base) {
- dev_err(dsaf_dev->dev,
- "%s of_iomap 1 fail!\n", dsaf_dev->ae_dev.name);
- ret = -ENOMEM;
- goto unmap_base_addr;
+ if (dev_of_node(dsaf_dev->dev)) {
+ np_temp = of_parse_phandle(np, "subctrl-syscon", 0);
+ syscon = syscon_node_to_regmap(np_temp);
+ of_node_put(np_temp);
+ if (IS_ERR_OR_NULL(syscon)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "subctrl info is needed!\n");
+ return -ENOMEM;
+ }
+
+ dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev,
+ res);
+ if (IS_ERR(dsaf_dev->sc_base)) {
+ dev_err(dsaf_dev->dev, "subctrl can not map!\n");
+ return PTR_ERR(dsaf_dev->sc_base);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n");
+ return -ENOMEM;
+ }
+
+ dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev,
+ res);
+ if (IS_ERR(dsaf_dev->sds_base)) {
+ dev_err(dsaf_dev->dev, "serdes-ctrl can not map!\n");
+ return PTR_ERR(dsaf_dev->sds_base);
+ }
+ } else {
+ dsaf_dev->sub_ctrl = syscon;
+ }
}
- dsaf_dev->ppe_base = of_iomap(np, 2);
- if (!dsaf_dev->ppe_base) {
- dev_err(dsaf_dev->dev,
- "%s of_iomap 2 fail!\n", dsaf_dev->ae_dev.name);
- ret = -ENOMEM;
- goto unmap_base_addr;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base");
+ if (!res) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "ppe-base info is needed!\n");
+ return -ENOMEM;
+ }
}
-
- dsaf_dev->io_base = of_iomap(np, 3);
- if (!dsaf_dev->io_base) {
- dev_err(dsaf_dev->dev,
- "%s of_iomap 3 fail!\n", dsaf_dev->ae_dev.name);
- ret = -ENOMEM;
- goto unmap_base_addr;
+ dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dsaf_dev->ppe_base)) {
+ dev_err(dsaf_dev->dev, "ppe-base resource can not map!\n");
+ return PTR_ERR(dsaf_dev->ppe_base);
+ }
+ dsaf_dev->ppe_paddr = res->start;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "dsaf-base");
+ if (!res) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ res_idx);
+ if (!res) {
+ dev_err(dsaf_dev->dev,
+ "dsaf-base info is needed!\n");
+ return -ENOMEM;
+ }
+ }
+ dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dsaf_dev->io_base)) {
+ dev_err(dsaf_dev->dev, "dsaf-base resource can not map!\n");
+ return PTR_ERR(dsaf_dev->io_base);
+ }
}
- dsaf_dev->cpld_base = of_iomap(np, 4);
- if (!dsaf_dev->cpld_base)
- dev_dbg(dsaf_dev->dev, "NO CPLD ADDR");
-
- ret = of_property_read_u32(np, "desc-num", &desc_num);
+ ret = device_property_read_u32(dsaf_dev->dev, "desc-num", &desc_num);
if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT ||
desc_num > HNS_DSAF_MAX_DESC_CNT) {
dev_err(dsaf_dev->dev, "get desc-num(%d) fail, ret=%d!\n",
desc_num, ret);
- goto unmap_base_addr;
+ return -EINVAL;
}
dsaf_dev->desc_num = desc_num;
- ret = of_property_read_u32(np, "buf-size", &buf_size);
+ ret = device_property_read_u32(dsaf_dev->dev, "reset-field-offset",
+ &reset_offset);
+ if (ret < 0) {
+ dev_dbg(dsaf_dev->dev,
+ "get reset-field-offset fail, ret=%d!\r\n", ret);
+ }
+ dsaf_dev->reset_offset = reset_offset;
+
+ ret = device_property_read_u32(dsaf_dev->dev, "buf-size", &buf_size);
if (ret < 0) {
dev_err(dsaf_dev->dev,
"get buf-size fail, ret=%d!\r\n", ret);
- goto unmap_base_addr;
+ return ret;
}
dsaf_dev->buf_size = buf_size;
@@ -130,46 +201,19 @@ int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
if (dsaf_dev->buf_size_type < 0) {
dev_err(dsaf_dev->dev,
"buf_size(%d) is wrong!\n", buf_size);
- goto unmap_base_addr;
+ return -EINVAL;
}
+ dsaf_dev->misc_op = hns_misc_op_get(dsaf_dev);
+ if (!dsaf_dev->misc_op)
+ return -ENOMEM;
+
if (!dma_set_mask_and_coherent(dsaf_dev->dev, DMA_BIT_MASK(64ULL)))
dev_dbg(dsaf_dev->dev, "set mask to 64bit\n");
else
dev_err(dsaf_dev->dev, "set mask to 64bit fail!\n");
return 0;
-
-unmap_base_addr:
- if (dsaf_dev->io_base)
- iounmap(dsaf_dev->io_base);
- if (dsaf_dev->ppe_base)
- iounmap(dsaf_dev->ppe_base);
- if (dsaf_dev->sds_base)
- iounmap(dsaf_dev->sds_base);
- if (dsaf_dev->sc_base)
- iounmap(dsaf_dev->sc_base);
- if (dsaf_dev->cpld_base)
- iounmap(dsaf_dev->cpld_base);
- return ret;
-}
-
-static void hns_dsaf_free_cfg(struct dsaf_device *dsaf_dev)
-{
- if (dsaf_dev->io_base)
- iounmap(dsaf_dev->io_base);
-
- if (dsaf_dev->ppe_base)
- iounmap(dsaf_dev->ppe_base);
-
- if (dsaf_dev->sds_base)
- iounmap(dsaf_dev->sds_base);
-
- if (dsaf_dev->sc_base)
- iounmap(dsaf_dev->sc_base);
-
- if (dsaf_dev->cpld_base)
- iounmap(dsaf_dev->cpld_base);
}
/**
@@ -217,9 +261,7 @@ static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev)
u32 q_id, q_num_per_port;
u32 i;
- hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
- HNS_DSAF_COMM_SERVICE_NW_IDX,
- &max_vfn, &max_q_per_vf);
+ hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf);
q_num_per_port = max_vfn * max_q_per_vf;
for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) {
@@ -239,9 +281,7 @@ static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev)
if (AE_IS_VER1(dsaf_dev->dsaf_ver))
return;
- hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode,
- HNS_DSAF_COMM_SERVICE_NW_IDX,
- &max_vfn, &max_q_per_vf);
+ hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf);
q_num_per_port = max_vfn * max_q_per_vf;
for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) {
@@ -477,10 +517,10 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110);
+ DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 48);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160);
+ DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 80);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
/* for no enable pfc mode */
@@ -488,29 +528,39 @@ static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128);
+ DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 192);
dsaf_set_field(o_sbm_bp_cfg,
DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M,
- DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192);
+ DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 240);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
}
/* PPE */
- reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i;
- o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
- dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M,
- DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 10);
- dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M,
- DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 12);
- dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ for (i = 0; i < DSAFV2_SBM_PPE_CHN; i++) {
+ reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S, 2);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S, 3);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M,
+ DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S, 52);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ }
+
/* RoCEE */
for (i = 0; i < DASFV2_ROCEE_CRD_NUM; i++) {
reg = DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i;
o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
- dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M,
- DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 2);
- dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M,
- DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 4);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S, 2);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S, 4);
dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
}
}
@@ -712,13 +762,15 @@ static void hns_dsaf_tbl_tcam_data_ucast_pul(
void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
{
- dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_MIX_MODE_S, !!en);
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
+ dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG,
+ DSAF_CFG_MIX_MODE_S, !!en);
}
void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en)
{
if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
- dsaf_dev->mac_cb[mac_id].mac_type == HNAE_PORT_DEBUG)
+ dsaf_dev->mac_cb[mac_id]->mac_type == HNAE_PORT_DEBUG)
return;
dsaf_set_dev_bit(dsaf_dev, DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
@@ -819,6 +871,8 @@ static void hns_dsaf_single_line_tbl_cfg(
struct dsaf_device *dsaf_dev,
u32 address, struct dsaf_tbl_line_cfg *ptbl_line)
{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_line_addr_cfg(dsaf_dev, address);
@@ -827,6 +881,8 @@ static void hns_dsaf_single_line_tbl_cfg(
/*Write Plus*/
hns_dsaf_tbl_line_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -840,6 +896,8 @@ static void hns_dsaf_tcam_uc_cfg(
struct dsaf_tbl_tcam_data *ptbl_tcam_data,
struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
/*Write Tcam Data*/
@@ -848,6 +906,8 @@ static void hns_dsaf_tcam_uc_cfg(
hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, ptbl_tcam_ucast);
/*Write Plus*/
hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -862,6 +922,8 @@ static void hns_dsaf_tcam_mc_cfg(
struct dsaf_tbl_tcam_data *ptbl_tcam_data,
struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
/*Write Tcam Data*/
@@ -870,6 +932,8 @@ static void hns_dsaf_tcam_mc_cfg(
hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, ptbl_tcam_mcast);
/*Write Plus*/
hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -879,6 +943,8 @@ static void hns_dsaf_tcam_mc_cfg(
*/
static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
@@ -891,6 +957,8 @@ static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
/*Write Plus*/
hns_dsaf_tbl_tcam_mcast_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -908,6 +976,8 @@ static void hns_dsaf_tcam_uc_get(
u32 tcam_read_data0;
u32 tcam_read_data4;
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
@@ -916,9 +986,9 @@ static void hns_dsaf_tcam_uc_get(
/*read tcam data*/
ptbl_tcam_data->tbl_tcam_data_high
- = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
- ptbl_tcam_data->tbl_tcam_data_low
= dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ ptbl_tcam_data->tbl_tcam_data_low
+ = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
/*read tcam mcast*/
tcam_read_data0 = dsaf_read_dev(dsaf_dev,
@@ -940,6 +1010,8 @@ static void hns_dsaf_tcam_uc_get(
DSAF_TBL_UCAST_CFG1_OUT_PORT_S);
ptbl_tcam_ucast->tbl_ucast_dvc
= dsaf_get_bit(tcam_read_data0, DSAF_TBL_UCAST_CFG1_DVC_S);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -956,6 +1028,8 @@ static void hns_dsaf_tcam_mc_get(
{
u32 data_tmp;
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
/*Write Addr*/
hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
@@ -964,9 +1038,9 @@ static void hns_dsaf_tcam_mc_get(
/*read tcam data*/
ptbl_tcam_data->tbl_tcam_data_high =
- dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
- ptbl_tcam_data->tbl_tcam_data_low =
dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ ptbl_tcam_data->tbl_tcam_data_low =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
/*read tcam mcast*/
ptbl_tcam_mcast->tbl_mcast_port_msk[0] =
@@ -986,6 +1060,8 @@ static void hns_dsaf_tcam_mc_get(
ptbl_tcam_mcast->tbl_mcast_port_msk[4] =
dsaf_get_field(data_tmp, DSAF_TBL_MCAST_CFG4_VM128_112_M,
DSAF_TBL_MCAST_CFG4_VM128_112_S);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
}
/**
@@ -1022,12 +1098,52 @@ static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev)
* @mac_cb: mac contrl block
*/
static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev,
- int mac_id, int en)
+ int mac_id, int tc_en)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, tc_en);
+}
+
+static void hns_dsaf_set_pfc_pause(struct dsaf_device *dsaf_dev,
+ int mac_id, int tx_en, int rx_en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ if (!tx_en || !rx_en)
+ dev_err(dsaf_dev->dev, "dsaf v1 can not close pfc!\n");
+
+ return;
+ }
+
+ dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_PFC_PAUSE_RX_EN_B, !!rx_en);
+ dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_PFC_PAUSE_TX_EN_B, !!tx_en);
+}
+
+int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ if (!en) {
+ dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n");
+ return -EINVAL;
+ }
+ }
+
+ dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_MAC_PAUSE_RX_EN_B, !!en);
+
+ return 0;
+}
+
+void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 *en)
{
- if (!en)
- dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0);
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ *en = 1;
else
- dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, 0xff);
+ *en = dsaf_get_dev_bit(dsaf_dev,
+ DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_MAC_PAUSE_RX_EN_B);
}
/**
@@ -1039,6 +1155,7 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
{
u32 i;
u32 o_dsaf_cfg;
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
o_dsaf_cfg = dsaf_read_dev(dsaf_dev, DSAF_CFG_0_REG);
dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_EN_S, dsaf_dev->dsaf_en);
@@ -1064,8 +1181,10 @@ static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
/*set dsaf pfc to 0 for parseing rx pause*/
- for (i = 0; i < DSAF_COMM_CHN; i++)
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
hns_dsaf_pfc_en_cfg(dsaf_dev, i, 0);
+ hns_dsaf_set_pfc_pause(dsaf_dev, i, is_ver1, is_ver1);
+ }
/*msk and clr exception irqs */
for (i = 0; i < DSAF_COMM_CHN; i++) {
@@ -1219,9 +1338,9 @@ static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev)
dev_dbg(dsaf_dev->dev,
"hns_dsaf_init_hw begin %s !\n", dsaf_dev->ae_dev.name);
- hns_dsaf_rst(dsaf_dev, 0);
+ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0);
mdelay(10);
- hns_dsaf_rst(dsaf_dev, 1);
+ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 1);
hns_dsaf_comm_init(dsaf_dev);
@@ -1249,7 +1368,7 @@ static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev)
static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev)
{
/*reset*/
- hns_dsaf_rst(dsaf_dev, 0);
+ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0);
}
/**
@@ -1264,6 +1383,10 @@ static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
u32 i;
int ret;
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return 0;
+
+ spin_lock_init(&dsaf_dev->tcam_lock);
ret = hns_dsaf_init_hw(dsaf_dev);
if (ret)
return ret;
@@ -2009,10 +2132,25 @@ void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb)
hns_dsaf_port_work_rate_cfg(dsaf_dev, mac_id, mode);
}
+static u32 hns_dsaf_get_inode_prio_reg(int index)
+{
+ int base_index, offset;
+ u32 base_addr = DSAF_INODE_IN_PRIO_PAUSE_BASE_REG;
+
+ base_index = (index + 1) / DSAF_REG_PER_ZONE;
+ offset = (index + 1) % DSAF_REG_PER_ZONE;
+
+ return base_addr + DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET * base_index +
+ DSAF_INODE_IN_PRIO_PAUSE_OFFSET * offset;
+}
+
void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
{
struct dsaf_hw_stats *hw_stats
= &dsaf_dev->hw_stats[node_num];
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+ int i;
+ u32 reg_tmp;
hw_stats->pad_drop += dsaf_read_dev(dsaf_dev,
DSAF_INODE_PAD_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
@@ -2022,8 +2160,12 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + 0x80 * (u64)node_num);
hw_stats->rx_pkt_id += dsaf_read_dev(dsaf_dev,
DSAF_INODE_SBM_PID_NUM_0_REG + 0x80 * (u64)node_num);
- hw_stats->rx_pause_frame += dsaf_read_dev(dsaf_dev,
- DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + 0x80 * (u64)node_num);
+
+ reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG :
+ DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG;
+ hw_stats->rx_pause_frame +=
+ dsaf_read_dev(dsaf_dev, reg_tmp + 0x80 * (u64)node_num);
+
hw_stats->release_buf_num += dsaf_read_dev(dsaf_dev,
DSAF_INODE_SBM_RELS_NUM_0_REG + 0x80 * (u64)node_num);
hw_stats->sbm_drop += dsaf_read_dev(dsaf_dev,
@@ -2042,6 +2184,18 @@ void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
DSAF_INODE_IN_DATA_STP_DISC_0_REG + 0x80 * (u64)node_num);
+ /* pfc pause frame statistics stored in dsaf inode*/
+ if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
+ for (i = 0; i < DSAF_PRIO_NR; i++) {
+ reg_tmp = hns_dsaf_get_inode_prio_reg(i);
+ hw_stats->rx_pfc[i] += dsaf_read_dev(dsaf_dev,
+ reg_tmp + 0x4 * (u64)node_num);
+ hw_stats->tx_pfc[i] += dsaf_read_dev(dsaf_dev,
+ DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG +
+ DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET * i +
+ 0xF0 * (u64)node_num);
+ }
+ }
hw_stats->tx_pkts += dsaf_read_dev(dsaf_dev,
DSAF_XOD_RCVPKT_CNT_0_REG + 0x90 * (u64)node_num);
}
@@ -2056,6 +2210,8 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
u32 i = 0;
u32 j;
u32 *p = data;
+ u32 reg_tmp;
+ bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver);
/* dsaf common registers */
p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG);
@@ -2120,8 +2276,9 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + j * 0x80);
p[190 + i] = dsaf_read_dev(ddev,
DSAF_INODE_SBM_PID_NUM_0_REG + j * 0x80);
- p[193 + i] = dsaf_read_dev(ddev,
- DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG + j * 0x80);
+ reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG :
+ DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG;
+ p[193 + i] = dsaf_read_dev(ddev, reg_tmp + j * 0x80);
p[196 + i] = dsaf_read_dev(ddev,
DSAF_INODE_SBM_RELS_NUM_0_REG + j * 0x80);
p[199 + i] = dsaf_read_dev(ddev,
@@ -2368,43 +2525,61 @@ void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
p[496] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
p[497] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+ if (!is_ver1)
+ p[498] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
+
/* mark end of dsaf regs */
- for (i = 498; i < 504; i++)
+ for (i = 499; i < 504; i++)
p[i] = 0xdddddddd;
}
-static char *hns_dsaf_get_node_stats_strings(char *data, int node)
+static char *hns_dsaf_get_node_stats_strings(char *data, int node,
+ struct dsaf_device *dsaf_dev)
{
char *buff = data;
+ int i;
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
+ if (node < DSAF_SERVICE_NW_NUM && !is_ver1) {
+ for (i = 0; i < DSAF_PRIO_NR; i++) {
+ snprintf(buff + 0 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
+ ETH_GSTRING_LEN, "inod%d_pfc_prio%d_pkts",
+ node, i);
+ snprintf(buff + 1 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
+ ETH_GSTRING_LEN, "onod%d_pfc_prio%d_pkts",
+ node, i);
+ buff += ETH_GSTRING_LEN;
+ }
+ buff += 1 * DSAF_PRIO_NR * ETH_GSTRING_LEN;
+ }
snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node);
- buff = buff + ETH_GSTRING_LEN;
+ buff += ETH_GSTRING_LEN;
return buff;
}
@@ -2413,7 +2588,9 @@ static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
int node_num)
{
u64 *p = data;
+ int i;
struct dsaf_hw_stats *hw_stats = &ddev->hw_stats[node_num];
+ bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver);
p[0] = hw_stats->pad_drop;
p[1] = hw_stats->man_pkts;
@@ -2428,8 +2605,16 @@ static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
p[10] = hw_stats->local_addr_false;
p[11] = hw_stats->vlan_drop;
p[12] = hw_stats->stp_drop;
- p[13] = hw_stats->tx_pkts;
+ if (node_num < DSAF_SERVICE_NW_NUM && !is_ver1) {
+ for (i = 0; i < DSAF_PRIO_NR; i++) {
+ p[13 + i + 0 * DSAF_PRIO_NR] = hw_stats->rx_pfc[i];
+ p[13 + i + 1 * DSAF_PRIO_NR] = hw_stats->tx_pfc[i];
+ }
+ p[29] = hw_stats->tx_pkts;
+ return &p[30];
+ }
+ p[13] = hw_stats->tx_pkts;
return &p[14];
}
@@ -2457,11 +2642,16 @@ void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port)
*@stringset: type of values in data
*return dsaf string name count
*/
-int hns_dsaf_get_sset_count(int stringset)
+int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset)
{
- if (stringset == ETH_SS_STATS)
- return DSAF_STATIC_NUM;
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+ if (stringset == ETH_SS_STATS) {
+ if (is_ver1)
+ return DSAF_STATIC_NUM;
+ else
+ return DSAF_V2_STATIC_NUM;
+ }
return 0;
}
@@ -2471,7 +2661,8 @@ int hns_dsaf_get_sset_count(int stringset)
*@data:strings name value
*@port:port index
*/
-void hns_dsaf_get_strings(int stringset, u8 *data, int port)
+void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+ struct dsaf_device *dsaf_dev)
{
char *buff = (char *)data;
int node = port;
@@ -2480,11 +2671,11 @@ void hns_dsaf_get_strings(int stringset, u8 *data, int port)
return;
/* for ge/xge node info */
- buff = hns_dsaf_get_node_stats_strings(buff, node);
+ buff = hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
/* for ppe node info */
node = port + DSAF_PPE_INODE_BASE;
- (void)hns_dsaf_get_node_stats_strings(buff, node);
+ (void)hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
}
/**
@@ -2520,7 +2711,7 @@ static int hns_dsaf_probe(struct platform_device *pdev)
ret = hns_dsaf_init(dsaf_dev);
if (ret)
- goto free_cfg;
+ goto free_dev;
ret = hns_mac_init(dsaf_dev);
if (ret)
@@ -2545,9 +2736,6 @@ uninit_mac:
uninit_dsaf:
hns_dsaf_free(dsaf_dev);
-free_cfg:
- hns_dsaf_free_cfg(dsaf_dev);
-
free_dev:
hns_dsaf_free_dev(dsaf_dev);
@@ -2570,8 +2758,6 @@ static int hns_dsaf_remove(struct platform_device *pdev)
hns_dsaf_free(dsaf_dev);
- hns_dsaf_free_cfg(dsaf_dev);
-
hns_dsaf_free_dev(dsaf_dev);
return 0;
@@ -2589,6 +2775,7 @@ static struct platform_driver g_dsaf_driver = {
.driver = {
.name = DSAF_DRV_NAME,
.of_match_table = g_dsaf_match,
+ .acpi_match_table = hns_dsaf_acpi_match,
},
};
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 5fea226efaf3..1daf018d9071 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -39,8 +39,12 @@ struct hns_mac_cb;
#define DSAF_DUMP_REGS_NUM 504
#define DSAF_STATIC_NUM 28
+#define DSAF_V2_STATIC_NUM 44
+#define DSAF_PRIO_NR 8
+#define DSAF_REG_PER_ZONE 3
#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
+#define HNS_DSAF_IS_DEBUG(dev) (dev->dsaf_mode == DSAF_MODE_DISABLE_SP)
enum hal_dsaf_mode {
HRD_DSAF_NO_DSAF_MODE = 0x0,
@@ -117,6 +121,7 @@ enum dsaf_mode {
DSAF_MODE_ENABLE_32VM, /**< en DSAF-mode, support 32 VM */
DSAF_MODE_ENABLE_128VM, /**< en DSAF-mode, support 128 VM */
DSAF_MODE_ENABLE, /**< before is enable DSAF mode*/
+ DSAF_MODE_DISABLE_SP, /* <non-dsaf, single port mode */
DSAF_MODE_DISABLE_FIX, /**< non-dasf, fixed to queue*/
DSAF_MODE_DISABLE_2PORT_8VM, /**< non-dasf, 2port 8VM */
DSAF_MODE_DISABLE_2PORT_16VM, /**< non-dasf, 2port 16VM */
@@ -174,6 +179,8 @@ struct dsaf_hw_stats {
u64 local_addr_false;
u64 vlan_drop;
u64 stp_drop;
+ u64 rx_pfc[DSAF_PRIO_NR];
+ u64 tx_pfc[DSAF_PRIO_NR];
u64 tx_pkts;
};
@@ -266,6 +273,27 @@ struct dsaf_int_stat {
};
+struct dsaf_misc_op {
+ void (*cpld_set_led)(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data);
+ void (*cpld_reset_led)(struct hns_mac_cb *mac_cb);
+ int (*cpld_set_led_id)(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status);
+ /* reset seris function, it will be reset if the dereseet is 0 */
+ void (*dsaf_reset)(struct dsaf_device *dsaf_dev, bool dereset);
+ void (*xge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
+ void (*xge_core_srst)(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset);
+ void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
+ void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
+ void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
+
+ phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
+ int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
+
+ int (*cfg_serdes_loopback)(struct hns_mac_cb *mac_cb, bool en);
+};
+
/* Dsaf device struct define ,and mac -> dsaf */
struct dsaf_device {
struct device *dev;
@@ -275,10 +303,12 @@ struct dsaf_device {
u8 __iomem *sds_base;
u8 __iomem *ppe_base;
u8 __iomem *io_base;
- u8 __iomem *cpld_base;
+ struct regmap *sub_ctrl;
+ phys_addr_t ppe_paddr;
u32 desc_num; /* desc num per queue*/
u32 buf_size; /* ring buffer size */
+ u32 reset_offset; /* reset field offset in sub sysctrl */
int buf_size_type; /* ring buffer size-type */
enum dsaf_mode dsaf_mode; /* dsaf mode */
enum hal_dsaf_mode dsaf_en;
@@ -287,10 +317,13 @@ struct dsaf_device {
struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
- struct hns_mac_cb *mac_cb;
+ struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM];
+ struct dsaf_misc_op *misc_op;
struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
struct dsaf_int_stat int_stat;
+ /* make sure tcam table config spinlock */
+ spinlock_t tcam_lock;
};
static inline void *hns_dsaf_dev_priv(const struct dsaf_device *dsaf_dev)
@@ -359,14 +392,6 @@ static inline void hns_dsaf_tbl_line_addr_cfg(struct dsaf_device *dsaf_dev,
tab_line_addr);
}
-static inline int hns_dsaf_get_comm_idx_by_port(int port)
-{
- if ((port < DSAF_COMM_CHN) || (port == DSAF_MAX_PORT_NUM_PER_CHIP))
- return 0;
- else
- return (port - DSAF_COMM_CHN + 1);
-}
-
static inline struct hnae_vf_cb *hns_ae_get_vf_cb(
struct hnae_handle *handle)
{
@@ -392,31 +417,26 @@ int hns_dsaf_get_mac_entry_by_index(
u16 entry_index,
struct dsaf_drv_mac_multi_dest_entry *mac_entry);
-void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val);
-
-void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
-
-void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val);
-
void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev);
-void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
-void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val);
-void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
- u32 port, u32 val);
-
void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num);
-int hns_dsaf_get_sset_count(int stringset);
+int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset);
void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port);
-void hns_dsaf_get_strings(int stringset, u8 *data, int port);
+void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+ struct dsaf_device *dsaf_dev);
void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
int hns_dsaf_get_regs_count(void);
void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
+
+void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 *en);
+int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 en);
void hns_dsaf_set_inner_lb(struct dsaf_device *dsaf_dev, u32 mac_id, u32 en);
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index e69b02287c44..611b67b6f450 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -7,13 +7,54 @@
* (at your option) any later version.
*/
-#include "hns_dsaf_misc.h"
#include "hns_dsaf_mac.h"
-#include "hns_dsaf_reg.h"
+#include "hns_dsaf_misc.h"
#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_reg.h"
+
+enum _dsm_op_index {
+ HNS_OP_RESET_FUNC = 0x1,
+ HNS_OP_SERDES_LP_FUNC = 0x2,
+ HNS_OP_LED_SET_FUNC = 0x3,
+ HNS_OP_GET_PORT_TYPE_FUNC = 0x4,
+ HNS_OP_GET_SFP_STAT_FUNC = 0x5,
+};
+
+enum _dsm_rst_type {
+ HNS_DSAF_RESET_FUNC = 0x1,
+ HNS_PPE_RESET_FUNC = 0x2,
+ HNS_XGE_CORE_RESET_FUNC = 0x3,
+ HNS_XGE_RESET_FUNC = 0x4,
+ HNS_GE_RESET_FUNC = 0x5,
+};
+
+const u8 hns_dsaf_acpi_dsm_uuid[] = {
+ 0x1A, 0xAA, 0x85, 0x1A, 0x93, 0xE2, 0x5E, 0x41,
+ 0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A
+};
+
+static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
+{
+ if (dsaf_dev->sub_ctrl)
+ dsaf_write_syscon(dsaf_dev->sub_ctrl, reg, val);
+ else
+ dsaf_write_reg(dsaf_dev->sc_base, reg, val);
+}
+
+static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
+{
+ u32 ret;
+
+ if (dsaf_dev->sub_ctrl)
+ ret = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg);
+ else
+ ret = dsaf_read_reg(dsaf_dev->sc_base, reg);
+
+ return ret;
+}
-void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
- u16 speed, int data)
+static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data)
{
int speed_reg = 0;
u8 value;
@@ -22,8 +63,8 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
pr_err("sfp_led_opt mac_dev is null!\n");
return;
}
- if (!mac_cb->cpld_vaddr) {
- dev_err(mac_cb->dev, "mac_id=%d, cpld_vaddr is null !\n",
+ if (!mac_cb->cpld_ctrl) {
+ dev_err(mac_cb->dev, "mac_id=%d, cpld syscon is null !\n",
mac_cb->mac_id);
return;
}
@@ -40,41 +81,50 @@ void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
dsaf_set_bit(value, DSAF_LED_DATA_B, data);
if (value != mac_cb->cpld_led_value) {
- dsaf_write_b(mac_cb->cpld_vaddr, value);
+ dsaf_write_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg, value);
mac_cb->cpld_led_value = value;
}
} else {
- dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
- mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
+ value = (mac_cb->cpld_led_value) & (0x1 << DSAF_LED_ANCHOR_B);
+ dsaf_write_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg, value);
+ mac_cb->cpld_led_value = value;
}
}
-void cpld_led_reset(struct hns_mac_cb *mac_cb)
+static void cpld_led_reset(struct hns_mac_cb *mac_cb)
{
- if (!mac_cb || !mac_cb->cpld_vaddr)
+ if (!mac_cb || !mac_cb->cpld_ctrl)
return;
- dsaf_write_b(mac_cb->cpld_vaddr, CPLD_LED_DEFAULT_VALUE);
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ CPLD_LED_DEFAULT_VALUE);
mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
}
-int cpld_set_led_id(struct hns_mac_cb *mac_cb,
- enum hnae_led_state status)
+static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
{
switch (status) {
case HNAE_LED_ACTIVE:
- mac_cb->cpld_led_value = dsaf_read_b(mac_cb->cpld_vaddr);
+ mac_cb->cpld_led_value =
+ dsaf_read_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg);
dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
CPLD_LED_ON_VALUE);
- dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
- return 2;
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ mac_cb->cpld_led_value);
+ break;
case HNAE_LED_INACTIVE:
dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
CPLD_LED_DEFAULT_VALUE);
- dsaf_write_b(mac_cb->cpld_vaddr, mac_cb->cpld_led_value);
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ mac_cb->cpld_led_value);
break;
default:
- break;
+ dev_err(mac_cb->dev, "invalid led state: %d!", status);
+ return -EINVAL;
}
return 0;
@@ -82,12 +132,40 @@ int cpld_set_led_id(struct hns_mac_cb *mac_cb,
#define RESET_REQ_OR_DREQ 1
-void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val)
+static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
+ u32 port_type, u32 port, u32 val)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args[3], argv4;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = port_type;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+ obj_args[2].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[2].integer.value = val;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 3;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dsaf_dev->dev),
+ hns_dsaf_acpi_dsm_uuid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_warn(dsaf_dev->dev, "reset port_type%d port%d fail!",
+ port_type, port);
+ return;
+ }
+
+ ACPI_FREE(obj);
+}
+
+static void hns_dsaf_rst(struct dsaf_device *dsaf_dev, bool dereset)
{
u32 xbar_reg_addr;
u32 nt_reg_addr;
- if (!val) {
+ if (!dereset) {
xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_REQ_REG;
nt_reg_addr = DSAF_SUB_SC_NT_RESET_REQ_REG;
} else {
@@ -95,13 +173,19 @@ void hns_dsaf_rst(struct dsaf_device *dsaf_dev, u32 val)
nt_reg_addr = DSAF_SUB_SC_NT_RESET_DREQ_REG;
}
- dsaf_write_reg(dsaf_dev->sc_base, xbar_reg_addr,
- RESET_REQ_OR_DREQ);
- dsaf_write_reg(dsaf_dev->sc_base, nt_reg_addr,
- RESET_REQ_OR_DREQ);
+ dsaf_write_sub(dsaf_dev, xbar_reg_addr, RESET_REQ_OR_DREQ);
+ dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ);
}
-void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+static void hns_dsaf_rst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_DSAF_RESET_FUNC,
+ 0, dereset);
+}
+
+static void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset)
{
u32 reg_val = 0;
u32 reg_addr;
@@ -110,18 +194,25 @@ void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
return;
reg_val |= RESET_REQ_OR_DREQ;
- reg_val |= 0x2082082 << port;
+ reg_val |= 0x2082082 << dsaf_dev->mac_cb[port]->port_rst_off;
- if (val == 0)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
- dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
+}
+
+static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_XGE_RESET_FUNC, port, dereset);
}
-void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
- u32 port, u32 val)
+static void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
{
u32 reg_val = 0;
u32 reg_addr;
@@ -129,112 +220,134 @@ void hns_dsaf_xge_core_srst_by_port(struct dsaf_device *dsaf_dev,
if (port >= DSAF_XGE_NUM)
return;
- reg_val |= XGMAC_TRX_CORE_SRST_M << port;
+ reg_val |= XGMAC_TRX_CORE_SRST_M
+ << dsaf_dev->mac_cb[port]->port_rst_off;
- if (val == 0)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
- dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
-void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+static void
+hns_dsaf_xge_core_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_XGE_CORE_RESET_FUNC, port, dereset);
+}
+
+static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset)
{
u32 reg_val_1;
u32 reg_val_2;
+ u32 port_rst_off;
if (port >= DSAF_GE_NUM)
return;
- if (port < DSAF_SERVICE_NW_NUM) {
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
reg_val_1 = 0x1 << port;
+ port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
/* there is difference between V1 and V2 in register.*/
- if (AE_IS_VER1(dsaf_dev->dsaf_ver))
- reg_val_2 = 0x1041041 << port;
- else
- reg_val_2 = 0x2082082 << port;
+ reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ?
+ 0x1041041 : 0x2082082;
+ reg_val_2 <<= port_rst_off;
- if (val == 0) {
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_REQ1_REG,
+ if (!dereset) {
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
reg_val_1);
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_REQ0_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ0_REG,
reg_val_2);
} else {
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_DREQ0_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ0_REG,
reg_val_2);
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG,
reg_val_1);
}
} else {
- reg_val_1 = 0x15540 << (port - 6);
- reg_val_2 = 0x100 << (port - 6);
+ reg_val_1 = 0x15540;
+ reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ? 0x100 : 0x40;
+
+ reg_val_1 <<= dsaf_dev->reset_offset;
+ reg_val_2 <<= dsaf_dev->reset_offset;
- if (val == 0) {
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_REQ1_REG,
+ if (!dereset) {
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
reg_val_1);
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_PPE_RESET_REQ_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_REQ_REG,
reg_val_2);
} else {
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG,
reg_val_1);
- dsaf_write_reg(dsaf_dev->sc_base,
- DSAF_SUB_SC_PPE_RESET_DREQ_REG,
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_DREQ_REG,
reg_val_2);
}
}
}
-void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port, u32 val)
+static void hns_dsaf_ge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_GE_RESET_FUNC, port, dereset);
+}
+
+static void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset)
{
u32 reg_val = 0;
u32 reg_addr;
- reg_val |= RESET_REQ_OR_DREQ << port;
+ reg_val |= RESET_REQ_OR_DREQ << dsaf_dev->mac_cb[port]->port_rst_off;
- if (val == 0)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
- dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
+}
+
+static void
+hns_ppe_srst_by_port_acpi(struct dsaf_device *dsaf_dev, u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_PPE_RESET_FUNC, port, dereset);
}
-void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
+static void hns_ppe_com_srst(struct dsaf_device *dsaf_dev, bool dereset)
{
- int comm_index = ppe_common->comm_index;
- struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
u32 reg_val;
u32 reg_addr;
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ if (!(dev_of_node(dsaf_dev->dev)))
+ return;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
reg_val = RESET_REQ_OR_DREQ;
- if (val == 0)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG;
} else {
- reg_val = 0x100 << (comm_index - 1);
+ reg_val = 0x100 << dsaf_dev->reset_offset;
- if (val == 0)
+ if (!dereset)
reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
else
reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
}
- dsaf_write_reg(dsaf_dev->sc_base, reg_addr, reg_val);
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
}
/**
@@ -242,52 +355,86 @@ void hns_ppe_com_srst(struct ppe_common_cb *ppe_common, u32 val)
* @mac_cb: mac control block
* retuen phy interface
*/
-phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
+static phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
{
u32 mode;
u32 reg;
- u32 shift;
bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
- void __iomem *sys_ctl_vaddr = mac_cb->sys_ctl_vaddr;
int mac_id = mac_cb->mac_id;
- phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
+ phy_interface_t phy_if;
- if (is_ver1 && (mac_id >= 6 && mac_id <= 7)) {
- phy_if = PHY_INTERFACE_MODE_SGMII;
- } else if (mac_id >= 0 && mac_id <= 3) {
- reg = is_ver1 ? HNS_MAC_HILINK4_REG : HNS_MAC_HILINK4V2_REG;
- mode = dsaf_read_reg(sys_ctl_vaddr, reg);
- /* mac_id 0, 1, 2, 3 ---> hilink4 lane 0, 1, 2, 3 */
- shift = is_ver1 ? 0 : mac_id;
- if (dsaf_get_bit(mode, shift))
- phy_if = PHY_INTERFACE_MODE_XGMII;
+ if (is_ver1) {
+ if (HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev))
+ return PHY_INTERFACE_MODE_SGMII;
+
+ if (mac_id >= 0 && mac_id <= 3)
+ reg = HNS_MAC_HILINK4_REG;
else
- phy_if = PHY_INTERFACE_MODE_SGMII;
- } else if (mac_id >= 4 && mac_id <= 7) {
- reg = is_ver1 ? HNS_MAC_HILINK3_REG : HNS_MAC_HILINK3V2_REG;
- mode = dsaf_read_reg(sys_ctl_vaddr, reg);
- /* mac_id 4, 5, 6, 7 ---> hilink3 lane 2, 3, 0, 1 */
- shift = is_ver1 ? 0 : mac_id <= 5 ? mac_id - 2 : mac_id - 6;
- if (dsaf_get_bit(mode, shift))
- phy_if = PHY_INTERFACE_MODE_XGMII;
+ reg = HNS_MAC_HILINK3_REG;
+ } else{
+ if (!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev) && mac_id <= 3)
+ reg = HNS_MAC_HILINK4V2_REG;
else
- phy_if = PHY_INTERFACE_MODE_SGMII;
+ reg = HNS_MAC_HILINK3V2_REG;
}
+
+ mode = dsaf_read_sub(mac_cb->dsaf_dev, reg);
+ if (dsaf_get_bit(mode, mac_cb->port_mode_off))
+ phy_if = PHY_INTERFACE_MODE_XGMII;
+ else
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+
return phy_if;
}
+static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
+{
+ phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
+ union acpi_object *obj;
+ union acpi_object obj_args, argv4;
+
+ obj_args.integer.type = ACPI_TYPE_INTEGER;
+ obj_args.integer.value = mac_cb->mac_id;
+
+ argv4.type = ACPI_TYPE_PACKAGE,
+ argv4.package.count = 1,
+ argv4.package.elements = &obj_args,
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ hns_dsaf_acpi_dsm_uuid, 0,
+ HNS_OP_GET_PORT_TYPE_FUNC, &argv4);
+
+ if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ return phy_if;
+
+ phy_if = obj->integer.value ?
+ PHY_INTERFACE_MODE_XGMII : PHY_INTERFACE_MODE_SGMII;
+
+ dev_dbg(mac_cb->dev, "mac_id=%d, phy_if=%d\n", mac_cb->mac_id, phy_if);
+
+ ACPI_FREE(obj);
+
+ return phy_if;
+}
+
+int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+ if (!mac_cb->cpld_ctrl)
+ return -ENODEV;
+
+ *sfp_prsnt = !dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg
+ + MAC_SFP_PORT_OFFSET);
+
+ return 0;
+}
+
/**
* hns_mac_config_sds_loopback - set loop back for serdes
* @mac_cb: mac control block
* retuen 0 == success
*/
-int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
+static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
{
- /* port 0-3 hilink4 base is serdes_vaddr + 0x00280000
- * port 4-7 hilink3 base is serdes_vaddr + 0x00200000
- */
- u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
- (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
const u8 lane_id[] = {
0, /* mac 0 -> lane 0 */
1, /* mac 1 -> lane 1 */
@@ -304,7 +451,7 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
int sfp_prsnt;
int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
- if (!mac_cb->phy_node) {
+ if (!mac_cb->phy_dev) {
if (ret)
pr_info("please confirm sfp is present or not\n");
else
@@ -312,7 +459,111 @@ int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, u8 en)
pr_info("no sfp in this eth\n");
}
- dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, !!en);
+ if (mac_cb->serdes_ctrl) {
+ u32 origin;
+
+ if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
+#define HILINK_ACCESS_SEL_CFG 0x40008
+ /* hilink4 & hilink3 use the same xge training and
+ * xge u adaptor. There is a hilink access sel cfg
+ * register to select which one to be configed
+ */
+ if ((!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev)) &&
+ (mac_cb->mac_id <= 3))
+ dsaf_write_syscon(mac_cb->serdes_ctrl,
+ HILINK_ACCESS_SEL_CFG, 0);
+ else
+ dsaf_write_syscon(mac_cb->serdes_ctrl,
+ HILINK_ACCESS_SEL_CFG, 3);
+ }
+
+ origin = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset);
+
+ dsaf_set_field(origin, 1ull << 10, 10, en);
+ dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
+ } else {
+ u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+ (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
+ dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
+ }
return 0;
}
+
+static int
+hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args[3], argv4;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = mac_cb->mac_id;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = !!en;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 2;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dsaf_dev->dev),
+ hns_dsaf_acpi_dsm_uuid, 0,
+ HNS_OP_SERDES_LP_FUNC, &argv4);
+ if (!obj) {
+ dev_warn(mac_cb->dsaf_dev->dev, "set port%d serdes lp fail!",
+ mac_cb->mac_id);
+
+ return -ENOTSUPP;
+ }
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
+struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_misc_op *misc_op;
+
+ misc_op = devm_kzalloc(dsaf_dev->dev, sizeof(*misc_op), GFP_KERNEL);
+ if (!misc_op)
+ return NULL;
+
+ if (dev_of_node(dsaf_dev->dev)) {
+ misc_op->cpld_set_led = hns_cpld_set_led;
+ misc_op->cpld_reset_led = cpld_led_reset;
+ misc_op->cpld_set_led_id = cpld_set_led_id;
+
+ misc_op->dsaf_reset = hns_dsaf_rst;
+ misc_op->xge_srst = hns_dsaf_xge_srst_by_port;
+ misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port;
+ misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
+ misc_op->ppe_srst = hns_ppe_srst_by_port;
+ misc_op->ppe_comm_srst = hns_ppe_com_srst;
+
+ misc_op->get_phy_if = hns_mac_get_phy_if;
+ misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
+
+ misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback;
+ } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
+ misc_op->cpld_set_led = hns_cpld_set_led;
+ misc_op->cpld_reset_led = cpld_led_reset;
+ misc_op->cpld_set_led_id = cpld_set_led_id;
+
+ misc_op->dsaf_reset = hns_dsaf_rst_acpi;
+ misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi;
+ misc_op->xge_core_srst = hns_dsaf_xge_core_srst_by_port_acpi;
+ misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
+ misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
+ misc_op->ppe_comm_srst = hns_ppe_com_srst;
+
+ misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
+ misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
+
+ misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi;
+ } else {
+ devm_kfree(dsaf_dev->dev, (void *)misc_op);
+ misc_op = NULL;
+ }
+
+ return (void *)misc_op;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
index 419f07aa9734..f06bb03d47a6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
@@ -33,11 +33,6 @@
#define DSAF_LED_DATA_B 4
#define DSAF_LED_ANCHOR_B 5
-void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
- u16 speed, int data);
-void cpld_led_reset(struct hns_mac_cb *mac_cb);
-int cpld_set_led_id(struct hns_mac_cb *mac_cb,
- enum hnae_led_state status);
-int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
+struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 5b7ae5ff43e8..6ea872287307 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -61,22 +61,10 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
}
}
-static void __iomem *hns_ppe_common_get_ioaddr(
- struct ppe_common_cb *ppe_common)
+static void __iomem *
+hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
{
- void __iomem *base_addr;
-
- int idx = ppe_common->comm_index;
-
- if (idx == HNS_DSAF_COMM_SERVICE_NW_IDX)
- base_addr = ppe_common->dsaf_dev->ppe_base
- + PPE_COMMON_REG_OFFSET;
- else
- base_addr = ppe_common->dsaf_dev->sds_base
- + (idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
- + PPE_COMMON_REG_OFFSET;
-
- return base_addr;
+ return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
}
/**
@@ -90,7 +78,7 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
struct ppe_common_cb *ppe_common;
int ppe_num;
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
ppe_num = HNS_PPE_SERVICE_NW_ENGINE_NUM;
else
ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM;
@@ -103,7 +91,7 @@ int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
ppe_common->ppe_num = ppe_num;
ppe_common->dsaf_dev = dsaf_dev;
ppe_common->comm_index = comm_index;
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
ppe_common->ppe_mode = PPE_COMMON_MODE_SERVICE;
else
ppe_common->ppe_mode = PPE_COMMON_MODE_DEBUG;
@@ -124,32 +112,7 @@ void hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
int ppe_idx)
{
- void __iomem *base_addr;
- int common_idx = ppe_common->comm_index;
-
- if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
- base_addr = ppe_common->dsaf_dev->ppe_base +
- ppe_idx * PPE_REG_OFFSET;
-
- } else {
- base_addr = ppe_common->dsaf_dev->sds_base +
- (common_idx - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET;
- }
-
- return base_addr;
-}
-
-static int hns_ppe_get_port(struct ppe_common_cb *ppe_common, int idx)
-{
- int port;
-
- if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE)
- port = idx;
- else
- port = HNS_PPE_SERVICE_NW_ENGINE_NUM
- + ppe_common->comm_index - 1;
-
- return port;
+ return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
}
static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common)
@@ -164,7 +127,6 @@ static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common)
ppe_cb->next = NULL;
ppe_cb->ppe_common_cb = ppe_common;
ppe_cb->index = i;
- ppe_cb->port = hns_ppe_get_port(ppe_common, i);
ppe_cb->io_base = hns_ppe_get_iobase(ppe_common, i);
ppe_cb->virq = 0;
}
@@ -237,11 +199,12 @@ static void hns_ppe_set_port_mode(struct hns_ppe_cb *ppe_cb,
static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common)
{
enum ppe_qid_mode qid_mode;
- enum dsaf_mode dsaf_mode = ppe_common->dsaf_dev->dsaf_mode;
+ struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
+ enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
- hns_ppe_com_srst(ppe_common, 0);
+ dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 0);
mdelay(100);
- hns_ppe_com_srst(ppe_common, 1);
+ dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 1);
mdelay(100);
if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
@@ -318,24 +281,26 @@ static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
{
struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb;
- u32 port = ppe_cb->port;
+ u32 port = ppe_cb->index;
struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev;
int i;
/* get default RSS key */
netdev_rss_key_fill(ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
- hns_ppe_srst_by_port(dsaf_dev, port, 0);
+ dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
mdelay(10);
- hns_ppe_srst_by_port(dsaf_dev, port, 1);
+ dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 1);
/* clr and msk except irq*/
hns_ppe_exc_irq_en(ppe_cb, 0);
- if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG)
+ if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG) {
hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE);
- else
+ dsaf_write_dev(ppe_cb, PPE_CFG_PAUSE_IDLE_CNT_REG, 0);
+ } else {
hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE);
+ }
hns_ppe_checksum_hw(ppe_cb, 0xffffffff);
hns_ppe_cnt_clr_ce(ppe_cb);
@@ -365,8 +330,10 @@ static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
u32 port;
if (ppe_cb->ppe_common_cb) {
+ struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
+
port = ppe_cb->index;
- hns_ppe_srst_by_port(ppe_cb->ppe_common_cb->dsaf_dev, port, 0);
+ dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
}
}
@@ -375,7 +342,8 @@ void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
u32 i;
for (i = 0; i < ppe_common->ppe_num; i++) {
- hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
+ if (ppe_common->dsaf_dev->mac_cb[i])
+ hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
memset(&ppe_common->ppe_cb[i], 0, sizeof(struct hns_ppe_cb));
}
}
@@ -408,8 +376,11 @@ void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index)
if (ret)
return;
- for (i = 0; i < ppe_common->ppe_num; i++)
- hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+ for (i = 0; i < ppe_common->ppe_num; i++) {
+ /* We only need to initiate ppe when the port exists */
+ if (dsaf_dev->mac_cb[i])
+ hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+ }
ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index e9c0ec2fa0dd..9d8e643e8aa6 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -80,7 +80,6 @@ struct hns_ppe_cb {
struct hns_ppe_hw_stats hw_stats;
u8 index; /* index in a ppe common device */
- u8 port; /* port id in dsaf */
void __iomem *io_base;
int virq;
u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 28ee26e5c478..ef1107777c08 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -270,7 +270,7 @@ static void hns_rcb_set_port_timeout(
static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
{
- if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
return HNS_RCB_SERVICE_NW_ENGINE_NUM;
else
return HNS_RCB_DEBUG_NW_ENGINE_NUM;
@@ -430,36 +430,20 @@ static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
static int hns_rcb_get_port_in_comm(
struct rcb_common_cb *rcb_common, int ring_idx)
{
- int comm_index = rcb_common->comm_index;
- int port;
- int q_num;
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
- q_num = (int)rcb_common->max_q_per_vf * rcb_common->max_vfn;
- port = ring_idx / q_num;
- } else {
- port = 0; /* config debug-ports port_id_in_comm to 0*/
- }
-
- return port;
+ return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn);
}
#define SERVICE_RING_IRQ_IDX(v1) \
((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX)
-#define DEBUG_RING_IRQ_IDX(v1) \
- ((v1) ? HNS_DEBUG_RING_IRQ_IDX : HNSV2_DEBUG_RING_IRQ_IDX)
-#define DEBUG_RING_IRQ_OFFSET(v1) \
- ((v1) ? HNS_DEBUG_RING_IRQ_OFFSET : HNSV2_DEBUG_RING_IRQ_OFFSET)
static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
{
- int comm_index = rcb_common->comm_index;
bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
+ if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
return SERVICE_RING_IRQ_IDX(is_ver1);
else
- return DEBUG_RING_IRQ_IDX(is_ver1) +
- (comm_index - 1) * DEBUG_RING_IRQ_OFFSET(is_ver1);
+ return HNS_DEBUG_RING_IRQ_IDX;
}
#define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
@@ -474,7 +458,6 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
u32 i;
u32 ring_num = rcb_common->ring_num;
int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common);
- struct device_node *np = rcb_common->dsaf_dev->dev->of_node;
struct platform_device *pdev =
to_platform_device(rcb_common->dsaf_dev->dev);
bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
@@ -489,10 +472,10 @@ void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
ring_pair_cb->port_id_in_comm =
hns_rcb_get_port_in_comm(rcb_common, i);
ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
- is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2) :
+ is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2) :
platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] =
- is_ver1 ? irq_of_parse_and_map(np, base_irq_idx + i * 2 + 1) :
+ is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) :
platform_get_irq(pdev, base_irq_idx + i * 3);
ring_pair_cb->q.phy_base =
RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i);
@@ -549,7 +532,7 @@ int hns_rcb_set_coalesce_usecs(
return 0;
if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
- if (rcb_common->comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
+ if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
dev_err(rcb_common->dsaf_dev->dev,
"error: not support coalesce_usecs setting!\n");
return -EINVAL;
@@ -557,7 +540,7 @@ int hns_rcb_set_coalesce_usecs(
}
if (timeout > HNS_RCB_MAX_COALESCED_USECS) {
dev_err(rcb_common->dsaf_dev->dev,
- "error: not support coalesce %dus!\n", timeout);
+ "error: coalesce_usecs setting supports 0~1023us\n");
return -EINVAL;
}
hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
@@ -601,113 +584,82 @@ int hns_rcb_set_coalesced_frames(
*@max_vfn : max vfn number
*@max_q_per_vf:max ring number per vm
*/
-void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
- u16 *max_vfn, u16 *max_q_per_vf)
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn,
+ u16 *max_q_per_vf)
{
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
- switch (dsaf_mode) {
- case DSAF_MODE_DISABLE_6PORT_0VM:
- *max_vfn = 1;
- *max_q_per_vf = 16;
- break;
- case DSAF_MODE_DISABLE_FIX:
- *max_vfn = 1;
- *max_q_per_vf = 1;
- break;
- case DSAF_MODE_DISABLE_2PORT_64VM:
- *max_vfn = 64;
- *max_q_per_vf = 1;
- break;
- case DSAF_MODE_DISABLE_6PORT_16VM:
- *max_vfn = 16;
- *max_q_per_vf = 1;
- break;
- default:
- *max_vfn = 1;
- *max_q_per_vf = 16;
- break;
- }
- } else {
+ switch (dsaf_mode) {
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ *max_vfn = 1;
+ *max_q_per_vf = 16;
+ break;
+ case DSAF_MODE_DISABLE_FIX:
+ case DSAF_MODE_DISABLE_SP:
*max_vfn = 1;
*max_q_per_vf = 1;
+ break;
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ *max_vfn = 64;
+ *max_q_per_vf = 1;
+ break;
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ *max_vfn = 16;
+ *max_q_per_vf = 1;
+ break;
+ default:
+ *max_vfn = 1;
+ *max_q_per_vf = 16;
+ break;
}
}
-int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev, int comm_index)
+int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
{
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
- switch (dsaf_dev->dsaf_mode) {
- case DSAF_MODE_ENABLE_FIX:
- return 1;
-
- case DSAF_MODE_DISABLE_FIX:
- return 6;
-
- case DSAF_MODE_ENABLE_0VM:
- return 32;
-
- case DSAF_MODE_DISABLE_6PORT_0VM:
- case DSAF_MODE_ENABLE_16VM:
- case DSAF_MODE_DISABLE_6PORT_2VM:
- case DSAF_MODE_DISABLE_6PORT_16VM:
- case DSAF_MODE_DISABLE_6PORT_4VM:
- case DSAF_MODE_ENABLE_8VM:
- return 96;
-
- case DSAF_MODE_DISABLE_2PORT_16VM:
- case DSAF_MODE_DISABLE_2PORT_8VM:
- case DSAF_MODE_ENABLE_32VM:
- case DSAF_MODE_DISABLE_2PORT_64VM:
- case DSAF_MODE_ENABLE_128VM:
- return 128;
-
- default:
- dev_warn(dsaf_dev->dev,
- "get ring num fail,use default!dsaf_mode=%d\n",
- dsaf_dev->dsaf_mode);
- return 128;
- }
- } else {
+ switch (dsaf_dev->dsaf_mode) {
+ case DSAF_MODE_ENABLE_FIX:
+ case DSAF_MODE_DISABLE_SP:
return 1;
+
+ case DSAF_MODE_DISABLE_FIX:
+ return 6;
+
+ case DSAF_MODE_ENABLE_0VM:
+ return 32;
+
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ case DSAF_MODE_ENABLE_16VM:
+ case DSAF_MODE_DISABLE_6PORT_2VM:
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ case DSAF_MODE_DISABLE_6PORT_4VM:
+ case DSAF_MODE_ENABLE_8VM:
+ return 96;
+
+ case DSAF_MODE_DISABLE_2PORT_16VM:
+ case DSAF_MODE_DISABLE_2PORT_8VM:
+ case DSAF_MODE_ENABLE_32VM:
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ case DSAF_MODE_ENABLE_128VM:
+ return 128;
+
+ default:
+ dev_warn(dsaf_dev->dev,
+ "get ring num fail,use default!dsaf_mode=%d\n",
+ dsaf_dev->dsaf_mode);
+ return 128;
}
}
-void __iomem *hns_rcb_common_get_vaddr(struct dsaf_device *dsaf_dev,
- int comm_index)
+void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
{
- void __iomem *base_addr;
-
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX)
- base_addr = dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
- else
- base_addr = dsaf_dev->sds_base
- + (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET
- + RCB_COMMON_REG_OFFSET;
+ struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
- return base_addr;
+ return dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
}
-static phys_addr_t hns_rcb_common_get_paddr(struct dsaf_device *dsaf_dev,
- int comm_index)
+static phys_addr_t hns_rcb_common_get_paddr(struct rcb_common_cb *rcb_common)
{
- struct device_node *np = dsaf_dev->dev->of_node;
- phys_addr_t phy_addr;
- const __be32 *tmp_addr;
- u64 addr_offset = 0;
- u64 size = 0;
- int index = 0;
-
- if (comm_index == HNS_DSAF_COMM_SERVICE_NW_IDX) {
- index = 2;
- addr_offset = RCB_COMMON_REG_OFFSET;
- } else {
- index = 1;
- addr_offset = (comm_index - 1) * HNS_DSAF_DEBUG_NW_REG_OFFSET +
- RCB_COMMON_REG_OFFSET;
- }
- tmp_addr = of_get_address(np, index, &size, NULL);
- phy_addr = of_translate_address(np, tmp_addr);
- return phy_addr + addr_offset;
+ struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
+
+ return dsaf_dev->ppe_paddr + RCB_COMMON_REG_OFFSET;
}
int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
@@ -717,7 +669,7 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
u16 max_vfn;
u16 max_q_per_vf;
- int ring_num = hns_rcb_get_ring_num(dsaf_dev, comm_index);
+ int ring_num = hns_rcb_get_ring_num(dsaf_dev);
rcb_common =
devm_kzalloc(dsaf_dev->dev, sizeof(*rcb_common) +
@@ -732,12 +684,12 @@ int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
rcb_common->desc_num = dsaf_dev->desc_num;
- hns_rcb_get_queue_mode(dsaf_mode, comm_index, &max_vfn, &max_q_per_vf);
+ hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf);
rcb_common->max_vfn = max_vfn;
rcb_common->max_q_per_vf = max_q_per_vf;
- rcb_common->io_base = hns_rcb_common_get_vaddr(dsaf_dev, comm_index);
- rcb_common->phy_base = hns_rcb_common_get_paddr(dsaf_dev, comm_index);
+ rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common);
+ rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common);
dsaf_dev->rcb_common[comm_index] = rcb_common;
return 0;
@@ -932,7 +884,7 @@ void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
{
u32 *regs = data;
bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
- bool is_dbg = (rcb_com->comm_index != HNS_DSAF_COMM_SERVICE_NW_IDX);
+ bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev);
u32 reg_tmp;
u32 reg_num_tmp;
u32 i = 0;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
index eb61014ad615..99b4e1ba0a94 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -40,7 +40,7 @@ struct rcb_common_cb;
#define HNS_RCB_DEF_COALESCED_FRAMES 50
#define HNS_RCB_CLK_FREQ_MHZ 350
#define HNS_RCB_MAX_COALESCED_USECS 0x3ff
-#define HNS_RCB_DEF_COALESCED_USECS 3
+#define HNS_RCB_DEF_COALESCED_USECS 50
#define HNS_RCB_COMMON_ENDIAN 1
@@ -111,7 +111,7 @@ void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
void hns_rcb_start(struct hnae_queue *q, u32 val);
void hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
-void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, int comm_index,
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
u16 *max_vfn, u16 *max_q_per_vf);
void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index 7d7204f45e78..235f74444b1d 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -10,25 +10,20 @@
#ifndef _DSAF_REG_H_
#define _DSAF_REG_H_
-#define HNS_DEBUG_RING_IRQ_IDX 55
-#define HNS_SERVICE_RING_IRQ_IDX 59
-#define HNS_DEBUG_RING_IRQ_OFFSET 2
-#define HNSV2_DEBUG_RING_IRQ_IDX 409
-#define HNSV2_SERVICE_RING_IRQ_IDX 25
-#define HNSV2_DEBUG_RING_IRQ_OFFSET 9
-
-#define DSAF_MAX_PORT_NUM_PER_CHIP 8
-#define DSAF_SERVICE_PORT_NUM_PER_DSAF 6
-#define DSAF_MAX_VM_NUM 128
-
-#define DSAF_COMM_DEV_NUM 3
-#define DSAF_PPE_INODE_BASE 6
-#define HNS_DSAF_COMM_SERVICE_NW_IDX 0
+#include <linux/regmap.h>
+#define HNS_DEBUG_RING_IRQ_IDX 0
+#define HNS_SERVICE_RING_IRQ_IDX 59
+#define HNSV2_SERVICE_RING_IRQ_IDX 25
+
+#define DSAF_MAX_PORT_NUM 6
+#define DSAF_MAX_VM_NUM 128
+
+#define DSAF_COMM_DEV_NUM 1
+#define DSAF_PPE_INODE_BASE 6
#define DSAF_DEBUG_NW_NUM 2
#define DSAF_SERVICE_NW_NUM 6
#define DSAF_COMM_CHN DSAF_SERVICE_NW_NUM
#define DSAF_GE_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
-#define DSAF_PORT_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
#define DSAF_XGE_NUM DSAF_SERVICE_NW_NUM
#define DSAF_PORT_TYPE_NUM 3
#define DSAF_NODE_NUM 18
@@ -37,7 +32,7 @@
#define DSAFV2_SBM_NUM 8
#define DSAFV2_SBM_XGE_CHN 6
#define DSAFV2_SBM_PPE_CHN 1
-#define DASFV2_ROCEE_CRD_NUM 8
+#define DASFV2_ROCEE_CRD_NUM 1
#define DSAF_VOQ_NUM DSAF_NODE_NUM
#define DSAF_INODE_NUM DSAF_NODE_NUM
@@ -137,6 +132,7 @@
#define DSAF_PPE_INT_STS_0_REG 0x1E0
#define DSAF_ROCEE_INT_STS_0_REG 0x200
#define DSAFV2_SERDES_LBK_0_REG 0x220
+#define DSAF_PAUSE_CFG_REG 0x240
#define DSAF_PPE_QID_CFG_0_REG 0x300
#define DSAF_SW_PORT_TYPE_0_REG 0x320
#define DSAF_STP_PORT_TYPE_0_REG 0x340
@@ -155,6 +151,7 @@
#define DSAF_INODE_FINAL_IN_PKT_NUM_0_REG 0x1030
#define DSAF_INODE_SBM_PID_NUM_0_REG 0x1038
#define DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x103C
+#define DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x1024
#define DSAF_INODE_SBM_RELS_NUM_0_REG 0x104C
#define DSAF_INODE_SBM_DROP_NUM_0_REG 0x1050
#define DSAF_INODE_CRC_FALSE_NUM_0_REG 0x1054
@@ -169,6 +166,9 @@
#define DSAF_INODE_GE_FC_EN_0_REG 0x1B00
#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50
#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x1C00
+#define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00
+#define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100
+#define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50
#define DSAF_SBM_CFG_REG_0_REG 0x2000
#define DSAF_SBM_BP_CFG_0_XGE_REG_0_REG 0x2004
@@ -178,7 +178,7 @@
#define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C
#define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C
#define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C
-#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C
+#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C
#define DSAF_SBM_FREE_CNT_0_0_REG 0x2010
#define DSAF_SBM_FREE_CNT_1_0_REG 0x2014
#define DSAF_SBM_BP_CNT_0_0_REG 0x2018
@@ -235,6 +235,8 @@
#define DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG 0x3074
#define DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG 0x3078
#define DSAF_XOD_FIFO_STATUS_0_REG 0x307C
+#define DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG 0x3A00
+#define DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET 0x4
#define DSAF_VOQ_ECC_INVERT_EN_0_REG 0x4004
#define DSAF_VOQ_SRAM_PKT_NUM_0_REG 0x4008
@@ -711,6 +713,10 @@
#define DSAF_PFC_UNINT_CNT_M ((1ULL << 9) - 1)
#define DSAF_PFC_UNINT_CNT_S 0
+#define DSAF_MAC_PAUSE_RX_EN_B 2
+#define DSAF_PFC_PAUSE_RX_EN_B 1
+#define DSAF_PFC_PAUSE_TX_EN_B 0
+
#define DSAF_PPE_QID_CFG_M 0xFF
#define DSAF_PPE_QID_CFG_S 0
@@ -790,6 +796,18 @@
#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9
#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9)
+#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S 0
+#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M (((1ULL << 8) - 1) << 0)
+#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S 8
+#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M (((1ULL << 8) - 1) << 8)
+
+#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S (0)
+#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M (((1ULL << 6) - 1) << 0)
+#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S (6)
+#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M (((1ULL << 6) - 1) << 6)
+#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S (12)
+#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M (((1ULL << 6) - 1) << 12)
+
#define DSAF_TBL_TCAM_ADDR_S 0
#define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1)
@@ -988,6 +1006,19 @@ static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
return readl(reg_addr + reg);
}
+static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
+{
+ regmap_write(base, reg, value);
+}
+
+static inline u32 dsaf_read_syscon(struct regmap *base, u32 reg)
+{
+ unsigned int val;
+
+ regmap_read(base, reg, &val);
+ return val;
+}
+
#define dsaf_read_dev(a, reg) \
dsaf_read_reg((a)->io_base, (reg))
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index fd90f3737963..8f4f0e8da984 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -119,7 +119,7 @@ static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode)
= (struct dsaf_device *)dev_get_drvdata(drv->dev);
u32 port = drv->mac_id;
- hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 1);
+ dsaf_dev->misc_op->xge_core_srst(dsaf_dev, port, 1);
mdelay(10);
/*enable XGE rX/tX */
@@ -157,7 +157,7 @@ static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode)
}
mdelay(10);
- hns_dsaf_xge_core_srst_by_port(dsaf_dev, port, 0);
+ dsaf_dev->misc_op->xge_core_srst(dsaf_dev, port, 0);
}
/**
@@ -198,9 +198,9 @@ static void hns_xgmac_init(void *mac_drv)
= (struct dsaf_device *)dev_get_drvdata(drv->dev);
u32 port = drv->mac_id;
- hns_dsaf_xge_srst_by_port(dsaf_dev, port, 0);
+ dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0);
mdelay(100);
- hns_dsaf_xge_srst_by_port(dsaf_dev, port, 1);
+ dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1);
mdelay(100);
hns_xgmac_exc_irq_en(drv, 0);
@@ -425,7 +425,7 @@ static void hns_xgmac_free(void *mac_drv)
u32 mac_id = drv->mac_id;
- hns_dsaf_xge_srst_by_port(dsaf_dev, mac_id, 0);
+ dsaf_dev->misc_op->xge_srst(dsaf_dev, mac_id, 0);
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 687204b780b0..d7e1f8c7ae92 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -132,6 +132,13 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
ring_ptr_move_fw(ring, next_to_use);
}
+static const struct acpi_device_id hns_enet_acpi_match[] = {
+ { "HISI00C1", 0 },
+ { "HISI00C2", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
+
static void fill_desc(struct hnae_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
int buf_num, enum hns_desc_type type, int mtu)
@@ -593,6 +600,7 @@ static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
ring->stats.sw_err_cnt++;
return -ENOMEM;
}
+ skb_reset_mac_header(skb);
prefetchw(skb->data);
length = le16_to_cpu(desc->rx.pkt_len);
@@ -754,16 +762,16 @@ static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
recv_pkts = 0, recv_bds = 0, clean_count = 0;
recv:
while (recv_pkts < budget && recv_bds < num) {
- /* reuse or realloc buffers*/
+ /* reuse or realloc buffers */
if (clean_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
hns_nic_alloc_rx_buffers(ring_data, clean_count);
clean_count = 0;
}
- /* poll one pkg*/
+ /* poll one pkt */
err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
if (unlikely(!skb)) /* this fault cannot be repaired */
- break;
+ goto out;
recv_bds += bnum;
clean_count += bnum;
@@ -789,6 +797,7 @@ recv:
}
}
+out:
/* make all data has been write before submit */
if (clean_count > 0)
hns_nic_alloc_rx_buffers(ring_data, clean_count);
@@ -983,8 +992,26 @@ static void hns_nic_adjust_link(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
+ int state = 1;
+
+ if (priv->phy) {
+ h->dev->ops->adjust_link(h, ndev->phydev->speed,
+ ndev->phydev->duplex);
+ state = priv->phy->link;
+ }
+ state = state && h->dev->ops->get_status(h);
- h->dev->ops->adjust_link(h, ndev->phydev->speed, ndev->phydev->duplex);
+ if (state != priv->link) {
+ if (state) {
+ netif_carrier_on(ndev);
+ netif_tx_wake_all_queues(ndev);
+ netdev_info(ndev, "link up\n");
+ } else {
+ netif_carrier_off(ndev);
+ netdev_info(ndev, "link down\n");
+ }
+ priv->link = state;
+ }
}
/**
@@ -996,19 +1023,22 @@ static void hns_nic_adjust_link(struct net_device *ndev)
int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- struct phy_device *phy_dev = NULL;
+ struct phy_device *phy_dev = h->phy_dev;
+ int ret;
- if (!h->phy_node)
+ if (!h->phy_dev)
return 0;
- if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
- phy_dev = of_phy_connect(ndev, h->phy_node,
- hns_nic_adjust_link, 0, h->phy_if);
- else
- phy_dev = of_phy_attach(ndev, h->phy_node, 0, h->phy_if);
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
+ phy_dev->dev_flags = 0;
- if (unlikely(!phy_dev) || IS_ERR(phy_dev))
- return !phy_dev ? -ENODEV : PTR_ERR(phy_dev);
+ ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
+ h->phy_if);
+ } else {
+ ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
+ }
+ if (unlikely(ret))
+ return -ENODEV;
phy_dev->supported &= h->if_support;
phy_dev->advertising = phy_dev->supported;
@@ -1067,13 +1097,8 @@ void hns_nic_update_stats(struct net_device *netdev)
static void hns_init_mac_addr(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- struct device_node *node = priv->dev->of_node;
- const void *mac_addr_temp;
- mac_addr_temp = of_get_mac_address(node);
- if (mac_addr_temp && is_valid_ether_addr(mac_addr_temp)) {
- memcpy(ndev->dev_addr, mac_addr_temp, ndev->addr_len);
- } else {
+ if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
eth_hw_addr_random(ndev);
dev_warn(priv->dev, "No valid mac, use random mac %pM",
ndev->dev_addr);
@@ -1176,7 +1201,7 @@ static int hns_nic_net_up(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
struct hnae_handle *h = priv->ae_handle;
- int i, j, k;
+ int i, j;
int ret;
ret = hns_nic_init_irq(priv);
@@ -1191,9 +1216,6 @@ static int hns_nic_net_up(struct net_device *ndev)
goto out_has_some_queues;
}
- for (k = 0; k < h->q_num; k++)
- h->dev->ops->toggle_queue_status(h->qs[k], 1);
-
ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
if (ret)
goto out_set_mac_addr_err;
@@ -1213,8 +1235,6 @@ static int hns_nic_net_up(struct net_device *ndev)
out_start_err:
netif_stop_queue(ndev);
out_set_mac_addr_err:
- for (k = 0; k < h->q_num; k++)
- h->dev->ops->toggle_queue_status(h->qs[k], 0);
out_has_some_queues:
for (j = i - 1; j >= 0; j--)
hns_nic_ring_close(ndev, j);
@@ -1275,7 +1295,7 @@ void hns_nic_net_reinit(struct net_device *netdev)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
- priv->netdev->trans_start = jiffies;
+ netif_trans_update(priv->netdev);
while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
usleep_range(1000, 2000);
@@ -1376,7 +1396,7 @@ static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
ret = hns_nic_net_xmit_hw(ndev, skb,
&tx_ring_data(priv, skb->queue_mapping));
if (ret == NETDEV_TX_OK) {
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
ndev->stats.tx_bytes += skb->len;
ndev->stats.tx_packets++;
}
@@ -1421,7 +1441,6 @@ static int hns_nic_set_features(struct net_device *netdev,
netdev_features_t features)
{
struct hns_nic_priv *priv = netdev_priv(netdev);
- struct hnae_handle *h = priv->ae_handle;
switch (priv->enet_ver) {
case AE_VERSION_1:
@@ -1434,11 +1453,9 @@ static int hns_nic_set_features(struct net_device *netdev,
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
/* The chip only support 7*4096 */
netif_set_gso_max_size(netdev, 7 * 4096);
- h->dev->ops->set_tso_stats(h, 1);
} else {
priv->ops.fill_desc = fill_v2_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
- h->dev->ops->set_tso_stats(h, 0);
}
break;
}
@@ -1571,27 +1588,14 @@ static void hns_nic_update_link_status(struct net_device *netdev)
struct hns_nic_priv *priv = netdev_priv(netdev);
struct hnae_handle *h = priv->ae_handle;
- int state = 1;
- if (priv->phy) {
- if (!genphy_update_link(priv->phy))
- state = priv->phy->link;
- else
- state = 0;
- }
- state = state && h->dev->ops->get_status(h);
+ if (h->phy_dev) {
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
+ return;
- if (state != priv->link) {
- if (state) {
- netif_carrier_on(netdev);
- netif_tx_wake_all_queues(netdev);
- netdev_info(netdev, "link up\n");
- } else {
- netif_carrier_off(netdev);
- netdev_info(netdev, "link down\n");
- }
- priv->link = state;
+ (void)genphy_read_status(h->phy_dev);
}
+ hns_nic_adjust_link(netdev);
}
/* for dumping key regs*/
@@ -1627,7 +1631,7 @@ static void hns_nic_dump(struct hns_nic_priv *priv)
}
}
-/* for resetting suntask*/
+/* for resetting subtask */
static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
{
enum hnae_port_type type = priv->ae_handle->port_type;
@@ -1648,7 +1652,7 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
rtnl_lock();
/* put off any impending NetWatchDogTimeout */
- priv->netdev->trans_start = jiffies;
+ netif_trans_update(priv->netdev);
if (type == HNAE_PORT_DEBUG) {
hns_nic_net_reinit(priv->netdev);
@@ -1797,11 +1801,14 @@ static void hns_nic_set_priv_ops(struct net_device *netdev)
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
/* This chip only support 7*4096 */
netif_set_gso_max_size(netdev, 7 * 4096);
- h->dev->ops->set_tso_stats(h, 1);
} else {
priv->ops.fill_desc = fill_v2_desc;
priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
}
+ /* enable tso when init
+ * control tso on/off through TSE bit in bd
+ */
+ h->dev->ops->set_tso_stats(h, 1);
}
}
@@ -1812,7 +1819,7 @@ static int hns_nic_try_get_ae(struct net_device *ndev)
int ret;
h = hnae_get_handle(&priv->netdev->dev,
- priv->ae_node, priv->port_id, NULL);
+ priv->fwnode, priv->port_id, NULL);
if (IS_ERR_OR_NULL(h)) {
ret = -ENODEV;
dev_dbg(priv->dev, "has not handle, register notifier!\n");
@@ -1872,7 +1879,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct net_device *ndev;
struct hns_nic_priv *priv;
- struct device_node *node = dev->of_node;
+ u32 port_id;
int ret;
ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
@@ -1885,21 +1892,56 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
priv->dev = dev;
priv->netdev = ndev;
- if (of_device_is_compatible(node, "hisilicon,hns-nic-v1"))
- priv->enet_ver = AE_VERSION_1;
- else
- priv->enet_ver = AE_VERSION_2;
+ if (dev_of_node(dev)) {
+ struct device_node *ae_node;
- priv->ae_node = (void *)of_parse_phandle(node, "ae-handle", 0);
- if (IS_ERR_OR_NULL(priv->ae_node)) {
- ret = PTR_ERR(priv->ae_node);
- dev_err(dev, "not find ae-handle\n");
- goto out_read_prop_fail;
+ if (of_device_is_compatible(dev->of_node,
+ "hisilicon,hns-nic-v1"))
+ priv->enet_ver = AE_VERSION_1;
+ else
+ priv->enet_ver = AE_VERSION_2;
+
+ ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
+ if (IS_ERR_OR_NULL(ae_node)) {
+ ret = PTR_ERR(ae_node);
+ dev_err(dev, "not find ae-handle\n");
+ goto out_read_prop_fail;
+ }
+ priv->fwnode = &ae_node->fwnode;
+ } else if (is_acpi_node(dev->fwnode)) {
+ struct acpi_reference_args args;
+
+ if (acpi_dev_found(hns_enet_acpi_match[0].id))
+ priv->enet_ver = AE_VERSION_1;
+ else if (acpi_dev_found(hns_enet_acpi_match[1].id))
+ priv->enet_ver = AE_VERSION_2;
+ else
+ return -ENXIO;
+
+ /* try to find port-idx-in-ae first */
+ ret = acpi_node_get_property_reference(dev->fwnode,
+ "ae-handle", 0, &args);
+ if (ret) {
+ dev_err(dev, "not find ae-handle\n");
+ goto out_read_prop_fail;
+ }
+ priv->fwnode = acpi_fwnode_handle(args.adev);
+ } else {
+ dev_err(dev, "cannot read cfg data from OF or acpi\n");
+ return -ENXIO;
}
- ret = of_property_read_u32(node, "port-id", &priv->port_id);
- if (ret)
- goto out_read_prop_fail;
+ ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
+ if (ret) {
+ /* only for old code compatible */
+ ret = device_property_read_u32(dev, "port-id", &port_id);
+ if (ret)
+ goto out_read_prop_fail;
+ /* for old dts, we need to caculate the port offset */
+ port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
+ : port_id - HNS_SRV_OFFSET;
+ }
+ priv->port_id = port_id;
hns_init_mac_addr(ndev);
@@ -1931,7 +1973,7 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
dev_dbg(dev, "set mask to 64bit\n");
else
- dev_err(dev, "set mask to 32bit fail!\n");
+ dev_err(dev, "set mask to 64bit fail!\n");
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(ndev);
@@ -2005,6 +2047,7 @@ static struct platform_driver hns_nic_dev_driver = {
.driver = {
.name = "hns-nic",
.of_match_table = hns_enet_of_match,
+ .acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
},
.probe = hns_nic_dev_probe,
.remove = hns_nic_dev_remove,
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
index c68ab3d34fc2..44bb3015eed3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -18,6 +18,9 @@
#include "hnae.h"
+#define HNS_DEBUG_OFFSET 6
+#define HNS_SRV_OFFSET 2
+
enum hns_nic_state {
NIC_STATE_TESTING = 0,
NIC_STATE_RESETTING,
@@ -51,7 +54,7 @@ struct hns_nic_ops {
};
struct hns_nic_priv {
- const struct device_node *ae_node;
+ const struct fwnode_handle *fwnode;
u32 enet_ver;
u32 port_id;
int phy_mode;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 3d746c887873..ab33487a5321 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -46,11 +46,10 @@ static u32 hns_nic_get_link(struct net_device *net_dev)
u32 link_stat = priv->link;
struct hnae_handle *h;
- assert(priv && priv->ae_handle);
h = priv->ae_handle;
if (priv->phy) {
- if (!genphy_update_link(priv->phy))
+ if (!genphy_read_status(priv->phy))
link_stat = priv->phy->link;
else
link_stat = 0;
@@ -166,13 +165,21 @@ static int hns_nic_get_settings(struct net_device *net_dev,
cmd->advertising |= ADVERTISED_10000baseKR_Full;
}
- if (h->port_type == HNAE_PORT_SERVICE) {
+ switch (h->media_type) {
+ case HNAE_MEDIA_TYPE_FIBER:
cmd->port = PORT_FIBRE;
- cmd->supported |= SUPPORTED_Pause;
- } else {
+ break;
+ case HNAE_MEDIA_TYPE_COPPER:
cmd->port = PORT_TP;
+ break;
+ case HNAE_MEDIA_TYPE_UNKNOWN:
+ default:
+ break;
}
+ if (!(AE_IS_VER1(priv->enet_ver) && h->port_type == HNAE_PORT_DEBUG))
+ cmd->supported |= SUPPORTED_Pause;
+
cmd->transceiver = XCVR_EXTERNAL;
cmd->mdio_support = (ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22);
hns_get_mdix_mode(net_dev, cmd);
@@ -243,6 +250,7 @@ static const char hns_nic_test_strs[][ETH_GSTRING_LEN] = {
static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
{
#define COPPER_CONTROL_REG 0
+#define PHY_POWER_DOWN BIT(11)
#define PHY_LOOP_BACK BIT(14)
u16 val = 0;
@@ -253,33 +261,40 @@ static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
/* speed : 1000M */
phy_write(phy_dev, HNS_PHY_PAGE_REG, 2);
phy_write(phy_dev, 21, 0x1046);
+
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
/* Force Master */
phy_write(phy_dev, 9, 0x1F00);
+
/* Soft-reset */
phy_write(phy_dev, 0, 0x9140);
/* If autoneg disabled,two soft-reset operations */
phy_write(phy_dev, 0, 0x9140);
- phy_write(phy_dev, 22, 0xFA);
+
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
/* Default is 0x0400 */
phy_write(phy_dev, 1, 0x418);
/* Force 1000M Link, Default is 0x0200 */
phy_write(phy_dev, 7, 0x20C);
- phy_write(phy_dev, 22, 0);
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
- /* Enable MAC loop-back */
+ /* Enable PHY loop-back */
val = phy_read(phy_dev, COPPER_CONTROL_REG);
val |= PHY_LOOP_BACK;
+ val &= ~PHY_POWER_DOWN;
phy_write(phy_dev, COPPER_CONTROL_REG, val);
} else {
- phy_write(phy_dev, 22, 0xFA);
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0xFA);
phy_write(phy_dev, 1, 0x400);
phy_write(phy_dev, 7, 0x200);
- phy_write(phy_dev, 22, 0);
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, 0);
+ phy_write(phy_dev, 9, 0xF00);
val = phy_read(phy_dev, COPPER_CONTROL_REG);
val &= ~PHY_LOOP_BACK;
+ val |= PHY_POWER_DOWN;
phy_write(phy_dev, COPPER_CONTROL_REG, val);
}
return 0;
@@ -340,28 +355,16 @@ static int __lb_up(struct net_device *ndev,
hns_nic_net_reset(ndev);
- if (priv->phy) {
- phy_disconnect(priv->phy);
- msleep(100);
-
- ret = hns_nic_init_phy(ndev, h);
- if (ret)
- return ret;
- }
-
ret = __lb_setup(ndev, loop_mode);
if (ret)
return ret;
- msleep(100);
+ msleep(200);
ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
if (ret)
return ret;
- if (priv->phy)
- phy_start(priv->phy);
-
/* link adjust duplex*/
if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
speed = 1000;
@@ -562,9 +565,6 @@ static int __lb_down(struct net_device *ndev)
__func__,
ret);
- if (priv->phy)
- phy_stop(priv->phy);
-
if (h->dev->ops->stop)
h->dev->ops->stop(h);
@@ -597,7 +597,7 @@ static void hns_nic_self_test(struct net_device *ndev,
st_param[1][0] = MAC_INTERNALLOOP_SERDES;
st_param[1][1] = 1; /*serdes must exist*/
st_param[2][0] = MAC_INTERNALLOOP_PHY; /* only supporte phy node*/
- st_param[2][1] = ((!!(priv->ae_handle->phy_node)) &&
+ st_param[2][1] = ((!!(priv->ae_handle->phy_dev)) &&
(priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII));
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
@@ -646,8 +646,6 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev,
{
struct hns_nic_priv *priv = netdev_priv(net_dev);
- assert(priv);
-
strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
sizeof(drvinfo->version));
drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
@@ -720,8 +718,6 @@ static int hns_set_pauseparam(struct net_device *net_dev,
struct hnae_handle *h;
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
h = priv->ae_handle;
ops = h->dev->ops;
@@ -763,6 +759,16 @@ static int hns_get_coalesce(struct net_device *net_dev,
&ec->tx_max_coalesced_frames,
&ec->rx_max_coalesced_frames);
+ ops->get_coalesce_range(priv->ae_handle,
+ &ec->tx_max_coalesced_frames_low,
+ &ec->rx_max_coalesced_frames_low,
+ &ec->tx_max_coalesced_frames_high,
+ &ec->rx_max_coalesced_frames_high,
+ &ec->tx_coalesce_usecs_low,
+ &ec->rx_coalesce_usecs_low,
+ &ec->tx_coalesce_usecs_high,
+ &ec->rx_coalesce_usecs_high);
+
return 0;
}
@@ -780,8 +786,6 @@ static int hns_set_coalesce(struct net_device *net_dev,
struct hnae_ae_ops *ops;
int ret;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
@@ -1111,8 +1115,6 @@ void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
cmd->version = HNS_CHIP_VERSION;
@@ -1135,8 +1137,6 @@ static int hns_get_regs_len(struct net_device *net_dev)
struct hns_nic_priv *priv = netdev_priv(net_dev);
struct hnae_ae_ops *ops;
- assert(priv || priv->ae_handle);
-
ops = priv->ae_handle->dev->ops;
if (!ops->get_regs_len) {
netdev_err(net_dev, "ops->get_regs_len is null!\n");
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 765ddb3dcd1a..33f4c483af0f 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -7,6 +7,7 @@
* (at your option) any later version.
*/
+#include <linux/acpi.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
@@ -36,9 +37,19 @@
#define MDIO_TIMEOUT 1000000
+struct hns_mdio_sc_reg {
+ u16 mdio_clk_en;
+ u16 mdio_clk_dis;
+ u16 mdio_reset_req;
+ u16 mdio_reset_dreq;
+ u16 mdio_clk_st;
+ u16 mdio_reset_st;
+};
+
struct hns_mdio_device {
void *vbase; /* mdio reg base address */
struct regmap *subctrl_vbase;
+ struct hns_mdio_sc_reg sc_reg;
};
/* mdio reg */
@@ -92,7 +103,6 @@ enum mdio_c45_op_seq {
#define MDIO_SC_CLK_DIS 0x33C
#define MDIO_SC_RESET_REQ 0xA38
#define MDIO_SC_RESET_DREQ 0xA3C
-#define MDIO_SC_CTRL 0x2010
#define MDIO_SC_CLK_ST 0x531C
#define MDIO_SC_RESET_ST 0x5A1C
@@ -352,69 +362,68 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
static int hns_mdio_reset(struct mii_bus *bus)
{
struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ const struct hns_mdio_sc_reg *sc_reg;
int ret;
- if (!mdio_dev->subctrl_vbase) {
- dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
- return -ENODEV;
- }
-
- /*1. reset req, and read reset st check*/
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_REQ, 0x1,
- MDIO_SC_RESET_ST, 0x1,
- MDIO_CHECK_SET_ST);
- if (ret) {
- dev_err(&bus->dev, "MDIO reset fail\n");
- return ret;
- }
+ if (dev_of_node(bus->parent)) {
+ if (!mdio_dev->subctrl_vbase) {
+ dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
+ return -ENODEV;
+ }
- /*2. dis clk, and read clk st check*/
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_DIS,
- 0x1, MDIO_SC_CLK_ST, 0x1,
- MDIO_CHECK_CLR_ST);
- if (ret) {
- dev_err(&bus->dev, "MDIO dis clk fail\n");
- return ret;
- }
+ sc_reg = &mdio_dev->sc_reg;
+ /* 1. reset req, and read reset st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_req,
+ 0x1, sc_reg->mdio_reset_st, 0x1,
+ MDIO_CHECK_SET_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO reset fail\n");
+ return ret;
+ }
- /*3. reset dreq, and read reset st check*/
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_RESET_DREQ, 0x1,
- MDIO_SC_RESET_ST, 0x1,
- MDIO_CHECK_CLR_ST);
- if (ret) {
- dev_err(&bus->dev, "MDIO dis clk fail\n");
- return ret;
- }
+ /* 2. dis clk, and read clk st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_dis,
+ 0x1, sc_reg->mdio_clk_st, 0x1,
+ MDIO_CHECK_CLR_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO dis clk fail\n");
+ return ret;
+ }
- /*4. en clk, and read clk st check*/
- ret = mdio_sc_cfg_reg_write(mdio_dev, MDIO_SC_CLK_EN,
- 0x1, MDIO_SC_CLK_ST, 0x1,
- MDIO_CHECK_SET_ST);
- if (ret)
- dev_err(&bus->dev, "MDIO en clk fail\n");
+ /* 3. reset dreq, and read reset st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_dreq,
+ 0x1, sc_reg->mdio_reset_st, 0x1,
+ MDIO_CHECK_CLR_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO dis clk fail\n");
+ return ret;
+ }
+ /* 4. en clk, and read clk st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_en,
+ 0x1, sc_reg->mdio_clk_st, 0x1,
+ MDIO_CHECK_SET_ST);
+ if (ret)
+ dev_err(&bus->dev, "MDIO en clk fail\n");
+ } else if (is_acpi_node(bus->parent->fwnode)) {
+ acpi_status s;
+
+ s = acpi_evaluate_object(ACPI_HANDLE(bus->parent),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(s)) {
+ dev_err(&bus->dev, "Reset failed, return:%#x\n", s);
+ ret = -EBUSY;
+ } else {
+ ret = 0;
+ }
+ } else {
+ dev_err(&bus->dev, "Can not get cfg data from DT or ACPI\n");
+ ret = -ENXIO;
+ }
return ret;
}
/**
- * hns_mdio_bus_name - get mdio bus name
- * @name: mdio bus name
- * @np: mdio device node pointer
- */
-static void hns_mdio_bus_name(char *name, struct device_node *np)
-{
- const u32 *addr;
- u64 taddr = OF_BAD_ADDR;
-
- addr = of_get_address(np, 0, NULL, NULL);
- if (addr)
- taddr = of_translate_address(np, addr);
-
- snprintf(name, MII_BUS_ID_SIZE, "%s@%llx", np->name,
- (unsigned long long)taddr);
-}
-
-/**
* hns_mdio_probe - probe mdio device
* @pdev: mdio platform device
*
@@ -422,17 +431,16 @@ static void hns_mdio_bus_name(char *name, struct device_node *np)
*/
static int hns_mdio_probe(struct platform_device *pdev)
{
- struct device_node *np;
struct hns_mdio_device *mdio_dev;
struct mii_bus *new_bus;
struct resource *res;
- int ret;
+ int ret = -ENODEV;
if (!pdev) {
dev_err(NULL, "pdev is NULL!\r\n");
return -ENODEV;
}
- np = pdev->dev.of_node;
+
mdio_dev = devm_kzalloc(&pdev->dev, sizeof(*mdio_dev), GFP_KERNEL);
if (!mdio_dev)
return -ENOMEM;
@@ -448,7 +456,7 @@ static int hns_mdio_probe(struct platform_device *pdev)
new_bus->write = hns_mdio_write;
new_bus->reset = hns_mdio_reset;
new_bus->priv = mdio_dev;
- hns_mdio_bus_name(new_bus->id, np);
+ new_bus->parent = &pdev->dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mdio_dev->vbase = devm_ioremap_resource(&pdev->dev, res);
@@ -457,16 +465,73 @@ static int hns_mdio_probe(struct platform_device *pdev)
return ret;
}
- mdio_dev->subctrl_vbase =
- syscon_node_to_regmap(of_parse_phandle(np, "subctrl-vbase", 0));
- if (IS_ERR(mdio_dev->subctrl_vbase)) {
- dev_warn(&pdev->dev, "no syscon hisilicon,peri-c-subctrl\n");
- mdio_dev->subctrl_vbase = NULL;
- }
- new_bus->parent = &pdev->dev;
platform_set_drvdata(pdev, new_bus);
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%s", "Mii",
+ dev_name(&pdev->dev));
+ if (dev_of_node(&pdev->dev)) {
+ struct of_phandle_args reg_args;
+
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "subctrl-vbase",
+ 4,
+ 0,
+ &reg_args);
+ if (!ret) {
+ mdio_dev->subctrl_vbase =
+ syscon_node_to_regmap(reg_args.np);
+ if (IS_ERR(mdio_dev->subctrl_vbase)) {
+ dev_warn(&pdev->dev, "syscon_node_to_regmap error\n");
+ mdio_dev->subctrl_vbase = NULL;
+ } else {
+ if (reg_args.args_count == 4) {
+ mdio_dev->sc_reg.mdio_clk_en =
+ (u16)reg_args.args[0];
+ mdio_dev->sc_reg.mdio_clk_dis =
+ (u16)reg_args.args[0] + 4;
+ mdio_dev->sc_reg.mdio_reset_req =
+ (u16)reg_args.args[1];
+ mdio_dev->sc_reg.mdio_reset_dreq =
+ (u16)reg_args.args[1] + 4;
+ mdio_dev->sc_reg.mdio_clk_st =
+ (u16)reg_args.args[2];
+ mdio_dev->sc_reg.mdio_reset_st =
+ (u16)reg_args.args[3];
+ } else {
+ /* for compatible */
+ mdio_dev->sc_reg.mdio_clk_en =
+ MDIO_SC_CLK_EN;
+ mdio_dev->sc_reg.mdio_clk_dis =
+ MDIO_SC_CLK_DIS;
+ mdio_dev->sc_reg.mdio_reset_req =
+ MDIO_SC_RESET_REQ;
+ mdio_dev->sc_reg.mdio_reset_dreq =
+ MDIO_SC_RESET_DREQ;
+ mdio_dev->sc_reg.mdio_clk_st =
+ MDIO_SC_CLK_ST;
+ mdio_dev->sc_reg.mdio_reset_st =
+ MDIO_SC_RESET_ST;
+ }
+ }
+ } else {
+ dev_warn(&pdev->dev, "find syscon ret = %#x\n", ret);
+ mdio_dev->subctrl_vbase = NULL;
+ }
+
+ ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
+ } else if (is_acpi_node(pdev->dev.fwnode)) {
+ /* Clear all the IRQ properties */
+ memset(new_bus->irq, PHY_POLL, 4 * PHY_MAX_ADDR);
+
+ /* Mask out all PHYs from auto probing. */
+ new_bus->phy_mask = ~0;
+
+ /* Register the MDIO bus */
+ ret = mdiobus_register(new_bus);
+ } else {
+ dev_err(&pdev->dev, "Can not get cfg data from DT or ACPI\n");
+ ret = -ENXIO;
+ }
- ret = of_mdiobus_register(new_bus, np);
if (ret) {
dev_err(&pdev->dev, "Cannot register as MDIO bus!\n");
platform_set_drvdata(pdev, NULL);
@@ -499,12 +564,19 @@ static const struct of_device_id hns_mdio_match[] = {
{}
};
+static const struct acpi_device_id hns_mdio_acpi_match[] = {
+ { "HISI0141", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hns_mdio_acpi_match);
+
static struct platform_driver hns_mdio_driver = {
.probe = hns_mdio_probe,
.remove = hns_mdio_remove,
.driver = {
.name = MDIO_DRV_NAME,
.of_match_table = hns_mdio_match,
+ .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match),
},
};
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 3daf2d4a7ca0..631dbc7b4dbb 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -1102,7 +1102,7 @@ static int hp100_open(struct net_device *dev)
return -EAGAIN;
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_start_queue(dev);
lp->lan_type = hp100_sense_lan(dev);
diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c
index 7ce6379fd1a3..befb4ac3e2b0 100644
--- a/drivers/net/ethernet/i825xx/82596.c
+++ b/drivers/net/ethernet/i825xx/82596.c
@@ -1042,7 +1042,7 @@ static void i596_tx_timeout (struct net_device *dev)
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue (dev);
}
diff --git a/drivers/net/ethernet/i825xx/lib82596.c b/drivers/net/ethernet/i825xx/lib82596.c
index c984998b34a0..3dbc53c21baa 100644
--- a/drivers/net/ethernet/i825xx/lib82596.c
+++ b/drivers/net/ethernet/i825xx/lib82596.c
@@ -960,7 +960,7 @@ static void i596_tx_timeout (struct net_device *dev)
lp->last_restart = dev->stats.tx_packets;
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue (dev);
}
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 353f57f675d0..21c84cc9c871 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -983,7 +983,7 @@ static void sun3_82586_timeout(struct net_device *dev)
p->scb->cmd_cuc = CUC_START;
sun3_attn586();
WAIT_4_SCB_CMD();
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
return 0;
}
#endif
@@ -996,7 +996,7 @@ static void sun3_82586_timeout(struct net_device *dev)
sun3_82586_close(dev);
sun3_82586_open(dev);
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
}
/******************************************************
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 2a0dc127df3f..54efa9a5167b 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1169,16 +1169,15 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
port = ehea_get_port(adapter, portnum);
+ if (!port) {
+ netdev_err(NULL, "unknown portnum %x\n", portnum);
+ return;
+ }
dev = port->netdev;
switch (ec) {
case EHEA_EC_PORTSTATE_CHG: /* port state change */
- if (!port) {
- netdev_err(dev, "unknown portnum %x\n", portnum);
- break;
- }
-
if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
if (!netif_carrier_ok(dev)) {
ret = ehea_sense_port_attr(port);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 5d7db6c01c46..7af09cbc53f0 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -301,7 +301,7 @@ static inline void emac_netif_stop(struct emac_instance *dev)
dev->no_mcast = 1;
netif_addr_unlock(dev->ndev);
netif_tx_unlock_bh(dev->ndev);
- dev->ndev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev->ndev); /* prevent tx timeout */
mal_poll_disable(dev->mal, &dev->commac);
netif_tx_disable(dev->ndev);
}
@@ -977,7 +977,37 @@ static void emac_set_multicast_list(struct net_device *ndev)
dev->mcast_pending = 1;
return;
}
+
+ mutex_lock(&dev->link_lock);
__emac_set_multicast_list(dev);
+ mutex_unlock(&dev->link_lock);
+}
+
+static int emac_set_mac_address(struct net_device *ndev, void *sa)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ struct sockaddr *addr = sa;
+ struct emac_regs __iomem *p = dev->emacp;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ mutex_lock(&dev->link_lock);
+
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+ emac_rx_disable(dev);
+ emac_tx_disable(dev);
+ out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
+ out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
+ (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
+ ndev->dev_addr[5]);
+ emac_tx_enable(dev);
+ emac_rx_enable(dev);
+
+ mutex_unlock(&dev->link_lock);
+
+ return 0;
}
static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
@@ -1377,7 +1407,7 @@ static inline int emac_xmit_finish(struct emac_instance *dev, int len)
DBG2(dev, "stopped TX queue" NL);
}
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
++dev->stats.tx_packets;
dev->stats.tx_bytes += len;
@@ -2686,7 +2716,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_do_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = emac_set_mac_address,
.ndo_start_xmit = emac_start_xmit,
.ndo_change_mtu = eth_change_mtu,
};
@@ -2699,7 +2729,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
.ndo_do_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = emac_set_mac_address,
.ndo_start_xmit = emac_start_xmit_sg,
.ndo_change_mtu = emac_change_mtu,
};
diff --git a/drivers/net/ethernet/ibm/emac/phy.c b/drivers/net/ethernet/ibm/emac/phy.c
index d3b9d103353e..5b88cc690c22 100644
--- a/drivers/net/ethernet/ibm/emac/phy.c
+++ b/drivers/net/ethernet/ibm/emac/phy.c
@@ -470,12 +470,38 @@ static struct mii_phy_def m88e1112_phy_def = {
.ops = &m88e1112_phy_ops,
};
+static int ar8035_init(struct mii_phy *phy)
+{
+ phy_write(phy, 0x1d, 0x5); /* Address debug register 5 */
+ phy_write(phy, 0x1e, 0x2d47); /* Value copied from u-boot */
+ phy_write(phy, 0x1d, 0xb); /* Address hib ctrl */
+ phy_write(phy, 0x1e, 0xbc20); /* Value copied from u-boot */
+
+ return 0;
+}
+
+static struct mii_phy_ops ar8035_phy_ops = {
+ .init = ar8035_init,
+ .setup_aneg = genmii_setup_aneg,
+ .setup_forced = genmii_setup_forced,
+ .poll_link = genmii_poll_link,
+ .read_link = genmii_read_link,
+};
+
+static struct mii_phy_def ar8035_phy_def = {
+ .phy_id = 0x004dd070,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Atheros 8035 Gigabit Ethernet",
+ .ops = &ar8035_phy_ops,
+};
+
static struct mii_phy_def *mii_phy_table[] = {
&et1011c_phy_def,
&cis8201_phy_def,
&bcm5248_phy_def,
&m88e1111_phy_def,
&m88e1112_phy_def,
+ &ar8035_phy_def,
&genmii_phy_def,
NULL
};
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6e9e16eee5d0..88f3c85fb04a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -61,6 +61,7 @@
#include <linux/proc_fs.h>
#include <linux/in.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/irq.h>
#include <linux/kthread.h>
#include <linux/seq_file.h>
@@ -74,6 +75,7 @@
#include <linux/uaccess.h>
#include <asm/firmware.h>
#include <linux/seq_file.h>
+#include <linux/workqueue.h>
#include "ibmvnic.h"
@@ -88,12 +90,14 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
static int ibmvnic_remove(struct vio_dev *);
static void release_sub_crqs(struct ibmvnic_adapter *);
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
union sub_crq *sub_crq);
+static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
static int enable_scrq_irq(struct ibmvnic_adapter *,
struct ibmvnic_sub_crq_queue *);
@@ -467,7 +471,8 @@ static int ibmvnic_open(struct net_device *netdev)
crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
ibmvnic_send_crq(adapter, &crq);
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
+
return 0;
bounce_map_failed:
@@ -517,7 +522,7 @@ static int ibmvnic_close(struct net_device *netdev)
for (i = 0; i < adapter->req_rx_queues; i++)
napi_disable(&adapter->napi[i]);
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
if (adapter->bounce_buffer) {
if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
@@ -561,10 +566,141 @@ static int ibmvnic_close(struct net_device *netdev)
return 0;
}
+/**
+ * build_hdr_data - creates L2/L3/L4 header data buffer
+ * @hdr_field - bitfield determining needed headers
+ * @skb - socket buffer
+ * @hdr_len - array of header lengths
+ * @tot_len - total length of data
+ *
+ * Reads hdr_field to determine which headers are needed by firmware.
+ * Builds a buffer containing these headers. Saves individual header
+ * lengths and total buffer length to be used to build descriptors.
+ */
+static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
+ int *hdr_len, u8 *hdr_data)
+{
+ int len = 0;
+ u8 *hdr;
+
+ hdr_len[0] = sizeof(struct ethhdr);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ hdr_len[1] = ip_hdr(skb)->ihl * 4;
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ hdr_len[2] = tcp_hdrlen(skb);
+ else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
+ hdr_len[2] = sizeof(struct udphdr);
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ hdr_len[1] = sizeof(struct ipv6hdr);
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ hdr_len[2] = tcp_hdrlen(skb);
+ else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
+ hdr_len[2] = sizeof(struct udphdr);
+ }
+
+ memset(hdr_data, 0, 120);
+ if ((hdr_field >> 6) & 1) {
+ hdr = skb_mac_header(skb);
+ memcpy(hdr_data, hdr, hdr_len[0]);
+ len += hdr_len[0];
+ }
+
+ if ((hdr_field >> 5) & 1) {
+ hdr = skb_network_header(skb);
+ memcpy(hdr_data + len, hdr, hdr_len[1]);
+ len += hdr_len[1];
+ }
+
+ if ((hdr_field >> 4) & 1) {
+ hdr = skb_transport_header(skb);
+ memcpy(hdr_data + len, hdr, hdr_len[2]);
+ len += hdr_len[2];
+ }
+ return len;
+}
+
+/**
+ * create_hdr_descs - create header and header extension descriptors
+ * @hdr_field - bitfield determining needed headers
+ * @data - buffer containing header data
+ * @len - length of data buffer
+ * @hdr_len - array of individual header lengths
+ * @scrq_arr - descriptor array
+ *
+ * Creates header and, if needed, header extension descriptors and
+ * places them in a descriptor array, scrq_arr
+ */
+
+static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
+ union sub_crq *scrq_arr)
+{
+ union sub_crq hdr_desc;
+ int tmp_len = len;
+ u8 *data, *cur;
+ int tmp;
+
+ while (tmp_len > 0) {
+ cur = hdr_data + len - tmp_len;
+
+ memset(&hdr_desc, 0, sizeof(hdr_desc));
+ if (cur != hdr_data) {
+ data = hdr_desc.hdr_ext.data;
+ tmp = tmp_len > 29 ? 29 : tmp_len;
+ hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
+ hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
+ hdr_desc.hdr_ext.len = tmp;
+ } else {
+ data = hdr_desc.hdr.data;
+ tmp = tmp_len > 24 ? 24 : tmp_len;
+ hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
+ hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
+ hdr_desc.hdr.len = tmp;
+ hdr_desc.hdr.l2_len = (u8)hdr_len[0];
+ hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
+ hdr_desc.hdr.l4_len = (u8)hdr_len[2];
+ hdr_desc.hdr.flag = hdr_field << 1;
+ }
+ memcpy(data, cur, tmp);
+ tmp_len -= tmp;
+ *scrq_arr = hdr_desc;
+ scrq_arr++;
+ }
+}
+
+/**
+ * build_hdr_descs_arr - build a header descriptor array
+ * @skb - socket buffer
+ * @num_entries - number of descriptors to be sent
+ * @subcrq - first TX descriptor
+ * @hdr_field - bit field determining which headers will be sent
+ *
+ * This function will build a TX descriptor array with applicable
+ * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
+ */
+
+static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
+ int *num_entries, u8 hdr_field)
+{
+ int hdr_len[3] = {0, 0, 0};
+ int tot_len, len;
+ u8 *hdr_data = txbuff->hdr_data;
+
+ tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
+ txbuff->hdr_data);
+ len = tot_len;
+ len -= 24;
+ if (len > 0)
+ num_entries += len % 29 ? len / 29 + 1 : len / 29;
+ create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
+ txbuff->indir_arr + 1);
+}
+
static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int queue_num = skb_get_queue_mapping(skb);
+ u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_tx_buff *tx_buff = NULL;
struct ibmvnic_tx_pool *tx_pool;
@@ -579,6 +715,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
unsigned long lpar_rc;
union sub_crq tx_crq;
unsigned int offset;
+ int num_entries = 1;
unsigned char *dst;
u64 *handle_array;
int index = 0;
@@ -644,11 +781,35 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
}
- if (skb->ip_summed == CHECKSUM_PARTIAL)
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
-
- lpar_rc = send_subcrq(adapter, handle_array[0], &tx_crq);
-
+ hdrs += 2;
+ }
+ /* determine if l2/3/4 headers are sent to firmware */
+ if ((*hdrs >> 7) & 1 &&
+ (skb->protocol == htons(ETH_P_IP) ||
+ skb->protocol == htons(ETH_P_IPV6))) {
+ build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
+ tx_crq.v1.n_crq_elem = num_entries;
+ tx_buff->indir_arr[0] = tx_crq;
+ tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
+ sizeof(tx_buff->indir_arr),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, tx_buff->indir_dma)) {
+ if (!firmware_has_feature(FW_FEATURE_CMO))
+ dev_err(dev, "tx: unable to map descriptor array\n");
+ tx_map_failed++;
+ tx_dropped++;
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+ lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
+ (u64)tx_buff->indir_dma,
+ (u64)num_entries);
+ } else {
+ lpar_rc = send_subcrq(adapter, handle_array[queue_num],
+ &tx_crq);
+ }
if (lpar_rc != H_SUCCESS) {
dev_err(dev, "tx failed with code %ld\n", lpar_rc);
@@ -832,7 +993,7 @@ restart_poll:
netdev->stats.rx_bytes += length;
frames_processed++;
}
- replenish_pools(adapter);
+ replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
if (frames_processed < budget) {
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
@@ -1054,12 +1215,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
goto reg_failed;
}
- scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
- if (scrq->irq == NO_IRQ) {
- dev_err(dev, "Error mapping irq\n");
- goto map_irq_failed;
- }
-
scrq->adapter = adapter;
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
scrq->cur = 0;
@@ -1072,12 +1227,6 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
return scrq;
-map_irq_failed:
- do {
- rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
- adapter->vdev->unit_address,
- scrq->crq_num);
- } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
reg_failed:
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
DMA_BIDIRECTIONAL);
@@ -1098,6 +1247,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
if (adapter->tx_scrq[i]) {
free_irq(adapter->tx_scrq[i]->irq,
adapter->tx_scrq[i]);
+ irq_dispose_mapping(adapter->tx_scrq[i]->irq);
release_sub_crq_queue(adapter,
adapter->tx_scrq[i]);
}
@@ -1109,6 +1259,7 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
if (adapter->rx_scrq[i]) {
free_irq(adapter->rx_scrq[i]->irq,
adapter->rx_scrq[i]);
+ irq_dispose_mapping(adapter->rx_scrq[i]->irq);
release_sub_crq_queue(adapter,
adapter->rx_scrq[i]);
}
@@ -1118,6 +1269,29 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
adapter->requested_caps = 0;
}
+static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (adapter->tx_scrq) {
+ for (i = 0; i < adapter->req_tx_queues; i++)
+ if (adapter->tx_scrq[i])
+ release_sub_crq_queue(adapter,
+ adapter->tx_scrq[i]);
+ adapter->tx_scrq = NULL;
+ }
+
+ if (adapter->rx_scrq) {
+ for (i = 0; i < adapter->req_rx_queues; i++)
+ if (adapter->rx_scrq[i])
+ release_sub_crq_queue(adapter,
+ adapter->rx_scrq[i]);
+ adapter->rx_scrq = NULL;
+ }
+
+ adapter->requested_caps = 0;
+}
+
static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *scrq)
{
@@ -1159,6 +1333,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
union sub_crq *next;
int index;
int i, j;
+ u8 first;
restart_loop:
while (pending_scrq(adapter, scrq)) {
@@ -1181,6 +1356,13 @@ restart_loop:
txbuff->data_dma[j] = 0;
txbuff->used_bounce = false;
}
+ /* if sub_crq was sent indirectly */
+ first = txbuff->indir_arr[0].generic.first;
+ if (first == IBMVNIC_CRQ_CMD) {
+ dma_unmap_single(dev, txbuff->indir_dma,
+ sizeof(txbuff->indir_arr),
+ DMA_TO_DEVICE);
+ }
if (txbuff->last_frag)
dev_kfree_skb_any(txbuff->skb);
@@ -1229,6 +1411,66 @@ static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
return IRQ_HANDLED;
}
+static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
+{
+ struct device *dev = &adapter->vdev->dev;
+ struct ibmvnic_sub_crq_queue *scrq;
+ int i = 0, j = 0;
+ int rc = 0;
+
+ for (i = 0; i < adapter->req_tx_queues; i++) {
+ scrq = adapter->tx_scrq[i];
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+
+ if (scrq->irq == NO_IRQ) {
+ rc = -EINVAL;
+ dev_err(dev, "Error mapping irq\n");
+ goto req_tx_irq_failed;
+ }
+
+ rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
+ 0, "ibmvnic_tx", scrq);
+
+ if (rc) {
+ dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
+ scrq->irq, rc);
+ irq_dispose_mapping(scrq->irq);
+ goto req_rx_irq_failed;
+ }
+ }
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ scrq = adapter->rx_scrq[i];
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+ if (scrq->irq == NO_IRQ) {
+ rc = -EINVAL;
+ dev_err(dev, "Error mapping irq\n");
+ goto req_rx_irq_failed;
+ }
+ rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
+ 0, "ibmvnic_rx", scrq);
+ if (rc) {
+ dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
+ scrq->irq, rc);
+ irq_dispose_mapping(scrq->irq);
+ goto req_rx_irq_failed;
+ }
+ }
+ return rc;
+
+req_rx_irq_failed:
+ for (j = 0; j < i; j++)
+ free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
+ irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ i = adapter->req_tx_queues;
+req_tx_irq_failed:
+ for (j = 0; j < i; j++)
+ free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
+ irq_dispose_mapping(adapter->rx_scrq[j]->irq);
+ release_sub_crqs_no_irqs(adapter);
+ return rc;
+}
+
static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
{
struct device *dev = &adapter->vdev->dev;
@@ -1237,8 +1479,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
union ibmvnic_crq crq;
int total_queues;
int more = 0;
- int i, j;
- int rc;
+ int i;
if (!retry) {
/* Sub-CRQ entries are 32 byte long */
@@ -1261,9 +1502,9 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
entries_page : adapter->max_rx_add_entries_per_subcrq;
/* Choosing the maximum number of queues supported by firmware*/
- adapter->req_tx_queues = adapter->min_tx_queues;
- adapter->req_rx_queues = adapter->min_rx_queues;
- adapter->req_rx_add_queues = adapter->min_rx_add_queues;
+ adapter->req_tx_queues = adapter->max_tx_queues;
+ adapter->req_rx_queues = adapter->max_rx_queues;
+ adapter->req_rx_add_queues = adapter->max_rx_add_queues;
adapter->req_mtu = adapter->max_mtu;
}
@@ -1317,13 +1558,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
for (i = 0; i < adapter->req_tx_queues; i++) {
adapter->tx_scrq[i] = allqueues[i];
adapter->tx_scrq[i]->pool_index = i;
- rc = request_irq(adapter->tx_scrq[i]->irq, ibmvnic_interrupt_tx,
- 0, "ibmvnic_tx", adapter->tx_scrq[i]);
- if (rc) {
- dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
- adapter->tx_scrq[i]->irq, rc);
- goto req_tx_irq_failed;
- }
}
adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
@@ -1334,13 +1568,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
for (i = 0; i < adapter->req_rx_queues; i++) {
adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
adapter->rx_scrq[i]->scrq_num = i;
- rc = request_irq(adapter->rx_scrq[i]->irq, ibmvnic_interrupt_rx,
- 0, "ibmvnic_rx", adapter->rx_scrq[i]);
- if (rc) {
- dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
- adapter->rx_scrq[i]->irq, rc);
- goto req_rx_irq_failed;
- }
}
memset(&crq, 0, sizeof(crq));
@@ -1393,15 +1620,6 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
return;
-req_rx_irq_failed:
- for (j = 0; j < i; j++)
- free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
- i = adapter->req_tx_queues;
-req_tx_irq_failed:
- for (j = 0; j < i; j++)
- free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
- kfree(adapter->rx_scrq);
- adapter->rx_scrq = NULL;
rx_failed:
kfree(adapter->tx_scrq);
adapter->tx_scrq = NULL;
@@ -1494,6 +1712,28 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
return rc;
}
+static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
+ u64 remote_handle, u64 ioba, u64 num_entries)
+{
+ unsigned int ua = adapter->vdev->unit_address;
+ struct device *dev = &adapter->vdev->dev;
+ int rc;
+
+ /* Make sure the hypervisor sees the complete request */
+ mb();
+ rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
+ cpu_to_be64(remote_handle),
+ ioba, num_entries);
+
+ if (rc) {
+ if (rc == H_CLOSED)
+ dev_warn(dev, "CRQ Queue closed\n");
+ dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
+ }
+
+ return rc;
+}
+
static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
union ibmvnic_crq *crq)
{
@@ -1589,13 +1829,11 @@ static void send_login(struct ibmvnic_adapter *adapter)
goto buf_map_failed;
}
- rsp_buffer_size =
- sizeof(struct ibmvnic_login_rsp_buffer) +
- sizeof(u64) * (adapter->req_tx_queues +
- adapter->req_rx_queues *
- adapter->req_rx_add_queues + adapter->
- req_rx_add_queues) +
- sizeof(u8) * (IBMVNIC_TX_DESC_VERSIONS);
+ rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
+ sizeof(u64) * adapter->req_tx_queues +
+ sizeof(u64) * adapter->req_rx_queues +
+ sizeof(u64) * adapter->req_rx_queues +
+ sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
if (!login_rsp_buffer)
@@ -1918,6 +2156,10 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
adapter->netdev->features |= NETIF_F_IPV6_CSUM;
+ if ((adapter->netdev->features &
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
+ adapter->netdev->features |= NETIF_F_RXCSUM;
+
memset(&crq, 0, sizeof(crq));
crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
@@ -1931,7 +2173,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff;
+ struct ibmvnic_error_buff *error_buff, *tmp;
unsigned long flags;
bool found = false;
int i;
@@ -1943,7 +2185,7 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
}
spin_lock_irqsave(&adapter->error_list_lock, flags);
- list_for_each_entry(error_buff, &adapter->errors, list)
+ list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
if (error_buff->error_id == crq->request_error_rsp.error_id) {
found = true;
list_del(&error_buff->list);
@@ -2158,9 +2400,9 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
*req_value,
(long int)be32_to_cpu(crq->request_capability_rsp.
number), name);
- release_sub_crqs(adapter);
+ release_sub_crqs_no_irqs(adapter);
*req_value = be32_to_cpu(crq->request_capability_rsp.number);
- complete(&adapter->init_done);
+ init_sub_crqs(adapter, 1);
return;
default:
dev_err(dev, "Error %d in request cap rsp\n",
@@ -2210,6 +2452,16 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
dma_unmap_single(dev, adapter->login_rsp_buf_token,
adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
+ /* If the number of queues requested can't be allocated by the
+ * server, the login response will return with code 1. We will need
+ * to resend the login buffer with fewer queues requested.
+ */
+ if (login_rsp_crq->generic.rc.code) {
+ adapter->renegotiate = true;
+ complete(&adapter->init_done);
+ return 0;
+ }
+
netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
netdev_dbg(adapter->netdev, "%016lx\n",
@@ -2459,7 +2711,7 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
out:
if (atomic_read(&adapter->running_cap_queries) == 0)
- complete(&adapter->init_done);
+ init_sub_crqs(adapter, 0);
/* We're done querying the capabilities, initialize sub-crqs */
}
@@ -2941,14 +3193,14 @@ static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
{
- struct ibmvnic_inflight_cmd *inflight_cmd;
+ struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
struct device *dev = &adapter->vdev->dev;
- struct ibmvnic_error_buff *error_buff;
+ struct ibmvnic_error_buff *error_buff, *tmp2;
unsigned long flags;
unsigned long flags2;
spin_lock_irqsave(&adapter->inflight_lock, flags);
- list_for_each_entry(inflight_cmd, &adapter->inflight, list) {
+ list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
switch (inflight_cmd->crq.generic.cmd) {
case LOGIN:
dma_unmap_single(dev, adapter->login_buf_token,
@@ -2965,8 +3217,8 @@ static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
break;
case REQUEST_ERROR_INFO:
spin_lock_irqsave(&adapter->error_list_lock, flags2);
- list_for_each_entry(error_buff, &adapter->errors,
- list) {
+ list_for_each_entry_safe(error_buff, tmp2,
+ &adapter->errors, list) {
dma_unmap_single(dev, error_buff->dma,
error_buff->len,
DMA_FROM_DEVICE);
@@ -3002,8 +3254,8 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
dev_info(dev, "Partner initialized\n");
/* Send back a response */
rc = ibmvnic_send_crq_init_complete(adapter);
- if (rc == 0)
- send_version_xchg(adapter);
+ if (!rc)
+ schedule_work(&adapter->vnic_crq_init);
else
dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
break;
@@ -3355,8 +3607,63 @@ static const struct file_operations ibmvnic_dump_ops = {
.release = single_release,
};
+static void handle_crq_init_rsp(struct work_struct *work)
+{
+ struct ibmvnic_adapter *adapter = container_of(work,
+ struct ibmvnic_adapter,
+ vnic_crq_init);
+ struct device *dev = &adapter->vdev->dev;
+ struct net_device *netdev = adapter->netdev;
+ unsigned long timeout = msecs_to_jiffies(30000);
+ int rc;
+
+ send_version_xchg(adapter);
+ reinit_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
+ dev_err(dev, "Passive init timeout\n");
+ goto task_failed;
+ }
+
+ do {
+ if (adapter->renegotiate) {
+ adapter->renegotiate = false;
+ release_sub_crqs_no_irqs(adapter);
+ send_cap_queries(adapter);
+
+ reinit_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout)) {
+ dev_err(dev, "Passive init timeout\n");
+ goto task_failed;
+ }
+ }
+ } while (adapter->renegotiate);
+ rc = init_sub_crq_irqs(adapter);
+
+ if (rc)
+ goto task_failed;
+
+ netdev->real_num_tx_queues = adapter->req_tx_queues;
+
+ rc = register_netdev(netdev);
+ if (rc) {
+ dev_err(dev,
+ "failed to register netdev rc=%d\n", rc);
+ goto register_failed;
+ }
+ dev_info(dev, "ibmvnic registered\n");
+
+ return;
+
+register_failed:
+ release_sub_crqs(adapter);
+task_failed:
+ dev_err(dev, "Passive initialization was not successful\n");
+}
+
static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
{
+ unsigned long timeout = msecs_to_jiffies(30000);
struct ibmvnic_adapter *adapter;
struct net_device *netdev;
unsigned char *mac_addr_p;
@@ -3393,6 +3700,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
+ INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
+
spin_lock_init(&adapter->stats_lock);
rc = ibmvnic_init_crq_queue(adapter);
@@ -3435,23 +3744,26 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
ibmvnic_send_crq_init(adapter);
init_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
-
- /* needed to pull init_sub_crqs outside of an interrupt context
- * because it creates IRQ mappings for the subCRQ queues, causing
- * a kernel warning
- */
- init_sub_crqs(adapter, 0);
-
- reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done, timeout))
+ return 0;
- /* if init_sub_crqs is partially successful, retry */
- while (!adapter->tx_scrq || !adapter->rx_scrq) {
- init_sub_crqs(adapter, 1);
+ do {
+ if (adapter->renegotiate) {
+ adapter->renegotiate = false;
+ release_sub_crqs_no_irqs(adapter);
+ send_cap_queries(adapter);
+
+ reinit_completion(&adapter->init_done);
+ if (!wait_for_completion_timeout(&adapter->init_done,
+ timeout))
+ return 0;
+ }
+ } while (adapter->renegotiate);
- reinit_completion(&adapter->init_done);
- wait_for_completion(&adapter->init_done);
+ rc = init_sub_crq_irqs(adapter);
+ if (rc) {
+ dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
+ goto free_debugfs;
}
netdev->real_num_tx_queues = adapter->req_tx_queues;
@@ -3459,12 +3771,14 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
rc = register_netdev(netdev);
if (rc) {
dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
- goto free_debugfs;
+ goto free_sub_crqs;
}
dev_info(&dev->dev, "ibmvnic registered\n");
return 0;
+free_sub_crqs:
+ release_sub_crqs(adapter);
free_debugfs:
if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
debugfs_remove_recursive(adapter->debugfs_dir);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 1a9993cc79b5..e82898fd518e 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -879,6 +879,9 @@ struct ibmvnic_tx_buff {
int pool_index;
bool last_frag;
bool used_bounce;
+ union sub_crq indir_arr[6];
+ u8 hdr_data[140];
+ dma_addr_t indir_dma;
};
struct ibmvnic_tx_pool {
@@ -977,6 +980,7 @@ struct ibmvnic_adapter {
struct ibmvnic_sub_crq_queue **tx_scrq;
struct ibmvnic_sub_crq_queue **rx_scrq;
int requested_caps;
+ bool renegotiate;
/* rx structs */
struct napi_struct *napi;
@@ -1041,4 +1045,6 @@ struct ibmvnic_adapter {
u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req;
u8 map_id;
+
+ struct work_struct vnic_crq_init;
};
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 3772f3ac956e..c0e17433f623 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -25,16 +25,13 @@ config E100
on the adapter. Look for a label that has a barcode and a number
in the format 123456-001 (six digits hyphen three digits).
- Use the above information and the Adapter & Driver ID Guide at:
+ Use the above information and the Adapter & Driver ID Guide that
+ can be located at:
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+ <http://support.intel.com>
to identify the adapter.
- For the latest Intel PRO/100 network driver for Linux, see:
-
- <http://www.intel.com/p/en_US/support/highlights/network/pro100plus>
-
More specific information on configuring the driver is in
<file:Documentation/networking/e100.txt>.
@@ -47,12 +44,7 @@ config E1000
---help---
This driver supports Intel(R) PRO/1000 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -71,12 +63,8 @@ config E1000E
This driver supports the PCI-Express Intel(R) PRO/1000 gigabit
ethernet family of adapters. For PCI or PCI-X e1000 adapters,
use the regular e1000 driver For more information on how to
- identify your adapter, go to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ identify your adapter, go to the Adapter & Driver ID Guide that
+ can be located at:
<http://support.intel.com>
@@ -101,12 +89,7 @@ config IGB
---help---
This driver supports Intel(R) 82575/82576 gigabit ethernet family of
adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -142,12 +125,7 @@ config IGBVF
---help---
This driver supports Intel(R) 82576 virtual functions. For more
information on how to identify your adapter, go to the Adapter &
- Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -164,12 +142,7 @@ config IXGB
This driver supports Intel(R) PRO/10GbE family of adapters for
PCI-X type cards. For PCI-E type cards, use the "ixgbe" driver
instead. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -187,29 +160,13 @@ config IXGBE
---help---
This driver supports Intel(R) 10GbE PCI Express family of
adapters. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
To compile this driver as a module, choose M here. The module
will be called ixgbe.
-config IXGBE_VXLAN
- bool "Virtual eXtensible Local Area Network Support"
- default n
- depends on IXGBE && VXLAN && !(IXGBE=y && VXLAN=m)
- ---help---
- This allows one to create VXLAN virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. VXLAN is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to use Virtual eXtensible Local Area Network
- (VXLAN) in the driver.
-
config IXGBE_HWMON
bool "Intel(R) 10GbE PCI Express adapters HWMON support"
default y
@@ -243,12 +200,7 @@ config IXGBEVF
---help---
This driver supports Intel(R) PCI Express virtual functions for the
Intel(R) ixgbe driver. For more information on how to identify your
- adapter, go to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/sb/CS-008441.htm>
-
- For general information and support, go to the Intel support
- website at:
+ adapter, go to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -266,39 +218,13 @@ config I40E
---help---
This driver supports Intel(R) Ethernet Controller XL710 Family of
devices. For more information on how to identify your adapter, go
- to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/adapter/pro100/21397.htm>
-
- For general information and support, go to the Intel support
- website at:
+ to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
To compile this driver as a module, choose M here. The module
will be called i40e.
-config I40E_VXLAN
- bool "Virtual eXtensible Local Area Network Support"
- default n
- depends on I40E && VXLAN && !(I40E=y && VXLAN=m)
- ---help---
- This allows one to create VXLAN virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. VXLAN is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to use Virtual eXtensible Local Area Network
- (VXLAN) in the driver.
-
-config I40E_GENEVE
- bool "Generic Network Virtualization Encapsulation (GENEVE) Support"
- depends on I40E && GENEVE && !(I40E=y && GENEVE=m)
- default n
- ---help---
- This allows one to create GENEVE virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. GENEVE is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to use GENEVE in the driver.
-
config I40E_DCB
bool "Data Center Bridging (DCB) Support"
default n
@@ -326,12 +252,7 @@ config I40EVF
---help---
This driver supports Intel(R) XL710 and X710 virtual functions.
For more information on how to identify your adapter, go to the
- Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/sb/CS-008441.htm>
-
- For general information and support, go to the Intel support
- website at:
+ Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
@@ -347,27 +268,11 @@ config FM10K
---help---
This driver supports Intel(R) FM10000 Ethernet Switch Host
Interface. For more information on how to identify your adapter,
- go to the Adapter & Driver ID Guide at:
-
- <http://support.intel.com/support/network/sb/CS-008441.htm>
-
- For general information and support, go to the Intel support
- website at:
+ go to the Adapter & Driver ID Guide that can be located at:
<http://support.intel.com>
To compile this driver as a module, choose M here. The module
will be called fm10k. MSI-X interrupt support is required
-config FM10K_VXLAN
- bool "Virtual eXtensible Local Area Network Support"
- default n
- depends on FM10K && VXLAN && !(FM10K=y && VXLAN=m)
- ---help---
- This allows one to create VXLAN virtual interfaces that provide
- Layer 2 Networks over Layer 3 Networks. VXLAN is often used
- to tunnel virtual network infrastructure in virtualized environments.
- Say Y here if you want to use Virtual eXtensible Local Area Network
- (VXLAN) in the driver.
-
endif # NET_VENDOR_INTEL
diff --git a/drivers/net/ethernet/intel/e1000/e1000.h b/drivers/net/ethernet/intel/e1000/e1000.h
index 98fe5a2cd6e3..d7bdea79e9fa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000.h
+++ b/drivers/net/ethernet/intel/e1000/e1000.h
@@ -358,6 +358,8 @@ struct net_device *e1000_get_hw_dev(struct e1000_hw *hw);
extern char e1000_driver_name[];
extern const char e1000_driver_version[];
+int e1000_open(struct net_device *netdev);
+int e1000_close(struct net_device *netdev);
int e1000_up(struct e1000_adapter *adapter);
void e1000_down(struct e1000_adapter *adapter);
void e1000_reinit_locked(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 83e557c7f279..975eeb885ca2 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1553,7 +1553,7 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ e1000_close(netdev);
else
e1000_reset(adapter);
@@ -1582,7 +1582,7 @@ static void e1000_diag_test(struct net_device *netdev,
e1000_reset(adapter);
clear_bit(__E1000_TESTING, &adapter->flags);
if (if_running)
- dev_open(netdev);
+ e1000_open(netdev);
} else {
e_info(hw, "online testing starting\n");
/* Online tests */
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index ae90d4f12b70..f42129d09e2c 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -114,8 +114,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static void e1000_remove(struct pci_dev *pdev);
static int e1000_alloc_queues(struct e1000_adapter *adapter);
static int e1000_sw_init(struct e1000_adapter *adapter);
-static int e1000_open(struct net_device *netdev);
-static int e1000_close(struct net_device *netdev);
+int e1000_open(struct net_device *netdev);
+int e1000_close(struct net_device *netdev);
static void e1000_configure_tx(struct e1000_adapter *adapter);
static void e1000_configure_rx(struct e1000_adapter *adapter);
static void e1000_setup_rctl(struct e1000_adapter *adapter);
@@ -1360,7 +1360,7 @@ static int e1000_alloc_queues(struct e1000_adapter *adapter)
* handler is registered with the OS, the watchdog task is started,
* and the stack is notified that the interface is ready.
**/
-static int e1000_open(struct net_device *netdev)
+int e1000_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -1437,7 +1437,7 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-static int e1000_close(struct net_device *netdev)
+int e1000_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 2af603f3e418..cd391376036c 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -121,7 +121,7 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
/* EEPROM access above 16k is unsupported */
if (size > 14)
size = 14;
- nvm->word_size = 1 << size;
+ nvm->word_size = BIT(size);
return 0;
}
@@ -845,27 +845,27 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL(0));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL(1));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
reg = er32(TARC(0));
reg &= ~(0xF << 27); /* 30:27 */
if (hw->phy.media_type != e1000_media_type_copper)
- reg &= ~(1 << 20);
+ reg &= ~BIT(20);
ew32(TARC(0), reg);
/* Transmit Arbitration Control 1 */
reg = er32(TARC(1));
if (er32(TCTL) & E1000_TCTL_MULR)
- reg &= ~(1 << 28);
+ reg &= ~BIT(28);
else
- reg |= (1 << 28);
+ reg |= BIT(28);
ew32(TARC(1), reg);
/* Disable IPv6 extension header parsing because some malformed
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 5f7016442ec4..6b03c8553e59 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -185,7 +185,7 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
/* EEPROM access above 16k is unsupported */
if (size > 14)
size = 14;
- nvm->word_size = 1 << size;
+ nvm->word_size = BIT(size);
break;
}
@@ -1163,12 +1163,12 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL(0));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL(1));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
@@ -1177,11 +1177,11 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
- reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
+ reg |= BIT(23) | BIT(24) | BIT(25) | BIT(26);
break;
case e1000_82574:
case e1000_82583:
- reg |= (1 << 26);
+ reg |= BIT(26);
break;
default:
break;
@@ -1193,12 +1193,12 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
- reg &= ~((1 << 29) | (1 << 30));
- reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
+ reg &= ~(BIT(29) | BIT(30));
+ reg |= BIT(22) | BIT(24) | BIT(25) | BIT(26);
if (er32(TCTL) & E1000_TCTL_MULR)
- reg &= ~(1 << 28);
+ reg &= ~BIT(28);
else
- reg |= (1 << 28);
+ reg |= BIT(28);
ew32(TARC(1), reg);
break;
default:
@@ -1211,7 +1211,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
reg = er32(CTRL);
- reg &= ~(1 << 29);
+ reg &= ~BIT(29);
ew32(CTRL, reg);
break;
default:
@@ -1224,8 +1224,8 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
reg = er32(CTRL_EXT);
- reg &= ~(1 << 23);
- reg |= (1 << 22);
+ reg &= ~BIT(23);
+ reg |= BIT(22);
ew32(CTRL_EXT, reg);
break;
default:
@@ -1261,7 +1261,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
case e1000_82574:
case e1000_82583:
reg = er32(GCR);
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(GCR, reg);
/* Workaround for hardware errata.
@@ -1308,8 +1308,8 @@ static void e1000_clear_vfta_82571(struct e1000_hw *hw)
E1000_VFTA_ENTRY_SHIFT) &
E1000_VFTA_ENTRY_MASK;
vfta_bit_in_reg =
- 1 << (hw->mng_cookie.vlan_id &
- E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+ BIT(hw->mng_cookie.vlan_id &
+ E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
}
break;
default:
@@ -2032,7 +2032,8 @@ const struct e1000_info e1000_82574_info = {
| FLAG2_DISABLE_ASPM_L0S
| FLAG2_DISABLE_ASPM_L1
| FLAG2_NO_DISABLE_RX
- | FLAG2_DMA_BURST,
+ | FLAG2_DMA_BURST
+ | FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
@@ -2053,7 +2054,8 @@ const struct e1000_info e1000_82583_info = {
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_DISABLE_ASPM_L0S
| FLAG2_DISABLE_ASPM_L1
- | FLAG2_NO_DISABLE_RX,
+ | FLAG2_NO_DISABLE_RX
+ | FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 1dc293bad87b..879cca47b021 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -109,18 +109,18 @@ struct e1000_info;
#define E1000_TXDCTL_DMA_BURST_ENABLE \
(E1000_TXDCTL_GRAN | /* set descriptor granularity */ \
E1000_TXDCTL_COUNT_DESC | \
- (1 << 16) | /* wthresh must be +1 more than desired */\
- (1 << 8) | /* hthresh */ \
- 0x1f) /* pthresh */
+ (1u << 16) | /* wthresh must be +1 more than desired */\
+ (1u << 8) | /* hthresh */ \
+ 0x1f) /* pthresh */
#define E1000_RXDCTL_DMA_BURST_ENABLE \
(0x01000000 | /* set descriptor granularity */ \
- (4 << 16) | /* set writeback threshold */ \
- (4 << 8) | /* set prefetch threshold */ \
+ (4u << 16) | /* set writeback threshold */ \
+ (4u << 8) | /* set prefetch threshold */ \
0x20) /* set hthresh */
-#define E1000_TIDV_FPD (1 << 31)
-#define E1000_RDTR_FPD (1 << 31)
+#define E1000_TIDV_FPD BIT(31)
+#define E1000_RDTR_FPD BIT(31)
enum e1000_boards {
board_82571,
@@ -347,6 +347,7 @@ struct e1000_adapter {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_clock_info;
struct pm_qos_request pm_qos_req;
+ s32 ptp_delta;
u16 eee_advert;
};
@@ -404,53 +405,54 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL)
/* hardware capability, feature, and workaround flags */
-#define FLAG_HAS_AMT (1 << 0)
-#define FLAG_HAS_FLASH (1 << 1)
-#define FLAG_HAS_HW_VLAN_FILTER (1 << 2)
-#define FLAG_HAS_WOL (1 << 3)
-/* reserved bit4 */
-#define FLAG_HAS_CTRLEXT_ON_LOAD (1 << 5)
-#define FLAG_HAS_SWSM_ON_LOAD (1 << 6)
-#define FLAG_HAS_JUMBO_FRAMES (1 << 7)
-#define FLAG_READ_ONLY_NVM (1 << 8)
-#define FLAG_IS_ICH (1 << 9)
-#define FLAG_HAS_MSIX (1 << 10)
-#define FLAG_HAS_SMART_POWER_DOWN (1 << 11)
-#define FLAG_IS_QUAD_PORT_A (1 << 12)
-#define FLAG_IS_QUAD_PORT (1 << 13)
-#define FLAG_HAS_HW_TIMESTAMP (1 << 14)
-#define FLAG_APME_IN_WUC (1 << 15)
-#define FLAG_APME_IN_CTRL3 (1 << 16)
-#define FLAG_APME_CHECK_PORT_B (1 << 17)
-#define FLAG_DISABLE_FC_PAUSE_TIME (1 << 18)
-#define FLAG_NO_WAKE_UCAST (1 << 19)
-#define FLAG_MNG_PT_ENABLED (1 << 20)
-#define FLAG_RESET_OVERWRITES_LAA (1 << 21)
-#define FLAG_TARC_SPEED_MODE_BIT (1 << 22)
-#define FLAG_TARC_SET_BIT_ZERO (1 << 23)
-#define FLAG_RX_NEEDS_RESTART (1 << 24)
-#define FLAG_LSC_GIG_SPEED_DROP (1 << 25)
-#define FLAG_SMART_POWER_DOWN (1 << 26)
-#define FLAG_MSI_ENABLED (1 << 27)
-/* reserved (1 << 28) */
-#define FLAG_TSO_FORCE (1 << 29)
-#define FLAG_RESTART_NOW (1 << 30)
-#define FLAG_MSI_TEST_FAILED (1 << 31)
-
-#define FLAG2_CRC_STRIPPING (1 << 0)
-#define FLAG2_HAS_PHY_WAKEUP (1 << 1)
-#define FLAG2_IS_DISCARDING (1 << 2)
-#define FLAG2_DISABLE_ASPM_L1 (1 << 3)
-#define FLAG2_HAS_PHY_STATS (1 << 4)
-#define FLAG2_HAS_EEE (1 << 5)
-#define FLAG2_DMA_BURST (1 << 6)
-#define FLAG2_DISABLE_ASPM_L0S (1 << 7)
-#define FLAG2_DISABLE_AIM (1 << 8)
-#define FLAG2_CHECK_PHY_HANG (1 << 9)
-#define FLAG2_NO_DISABLE_RX (1 << 10)
-#define FLAG2_PCIM2PCI_ARBITER_WA (1 << 11)
-#define FLAG2_DFLT_CRC_STRIPPING (1 << 12)
-#define FLAG2_CHECK_RX_HWTSTAMP (1 << 13)
+#define FLAG_HAS_AMT BIT(0)
+#define FLAG_HAS_FLASH BIT(1)
+#define FLAG_HAS_HW_VLAN_FILTER BIT(2)
+#define FLAG_HAS_WOL BIT(3)
+/* reserved BIT(4) */
+#define FLAG_HAS_CTRLEXT_ON_LOAD BIT(5)
+#define FLAG_HAS_SWSM_ON_LOAD BIT(6)
+#define FLAG_HAS_JUMBO_FRAMES BIT(7)
+#define FLAG_READ_ONLY_NVM BIT(8)
+#define FLAG_IS_ICH BIT(9)
+#define FLAG_HAS_MSIX BIT(10)
+#define FLAG_HAS_SMART_POWER_DOWN BIT(11)
+#define FLAG_IS_QUAD_PORT_A BIT(12)
+#define FLAG_IS_QUAD_PORT BIT(13)
+#define FLAG_HAS_HW_TIMESTAMP BIT(14)
+#define FLAG_APME_IN_WUC BIT(15)
+#define FLAG_APME_IN_CTRL3 BIT(16)
+#define FLAG_APME_CHECK_PORT_B BIT(17)
+#define FLAG_DISABLE_FC_PAUSE_TIME BIT(18)
+#define FLAG_NO_WAKE_UCAST BIT(19)
+#define FLAG_MNG_PT_ENABLED BIT(20)
+#define FLAG_RESET_OVERWRITES_LAA BIT(21)
+#define FLAG_TARC_SPEED_MODE_BIT BIT(22)
+#define FLAG_TARC_SET_BIT_ZERO BIT(23)
+#define FLAG_RX_NEEDS_RESTART BIT(24)
+#define FLAG_LSC_GIG_SPEED_DROP BIT(25)
+#define FLAG_SMART_POWER_DOWN BIT(26)
+#define FLAG_MSI_ENABLED BIT(27)
+/* reserved BIT(28) */
+#define FLAG_TSO_FORCE BIT(29)
+#define FLAG_RESTART_NOW BIT(30)
+#define FLAG_MSI_TEST_FAILED BIT(31)
+
+#define FLAG2_CRC_STRIPPING BIT(0)
+#define FLAG2_HAS_PHY_WAKEUP BIT(1)
+#define FLAG2_IS_DISCARDING BIT(2)
+#define FLAG2_DISABLE_ASPM_L1 BIT(3)
+#define FLAG2_HAS_PHY_STATS BIT(4)
+#define FLAG2_HAS_EEE BIT(5)
+#define FLAG2_DMA_BURST BIT(6)
+#define FLAG2_DISABLE_ASPM_L0S BIT(7)
+#define FLAG2_DISABLE_AIM BIT(8)
+#define FLAG2_CHECK_PHY_HANG BIT(9)
+#define FLAG2_NO_DISABLE_RX BIT(10)
+#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11)
+#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
+#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
+#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@@ -480,6 +482,8 @@ extern const char e1000e_driver_version[];
void e1000e_check_options(struct e1000_adapter *adapter);
void e1000e_set_ethtool_ops(struct net_device *netdev);
+int e1000e_open(struct net_device *netdev);
+int e1000e_close(struct net_device *netdev);
void e1000e_up(struct e1000_adapter *adapter);
void e1000e_down(struct e1000_adapter *adapter, bool reset);
void e1000e_reinit_locked(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 6cab1f30d41e..7aff68a4a4df 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -201,6 +201,9 @@ static int e1000_get_settings(struct net_device *netdev,
else
ecmd->eth_tp_mdix_ctrl = hw->phy.mdix;
+ if (hw->phy.media_type != e1000_media_type_copper)
+ ecmd->eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+
return 0;
}
@@ -236,8 +239,13 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
mac->forced_speed_duplex = ADVERTISE_100_FULL;
break;
case SPEED_1000 + DUPLEX_FULL:
- mac->autoneg = 1;
- adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+ if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+ mac->autoneg = 1;
+ adapter->hw.phy.autoneg_advertised =
+ ADVERTISE_1000_FULL;
+ } else {
+ mac->forced_speed_duplex = ADVERTISE_1000_FULL;
+ }
break;
case SPEED_1000 + DUPLEX_HALF: /* not supported */
default:
@@ -439,8 +447,9 @@ static void e1000_get_regs(struct net_device *netdev,
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
- regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
- adapter->pdev->device;
+ regs->version = (1u << 24) |
+ (adapter->pdev->revision << 16) |
+ adapter->pdev->device;
regs_buff[0] = er32(CTRL);
regs_buff[1] = er32(STATUS);
@@ -895,7 +904,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch2lan:
case e1000_pch_lpt:
case e1000_pch_spt:
- mask |= (1 << 18);
+ mask |= BIT(18);
break;
default:
break;
@@ -914,9 +923,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
/* SHRAH[9] different than the others */
if (i == 10)
- mask |= (1 << 30);
+ mask |= BIT(30);
else
- mask &= ~(1 << 30);
+ mask &= ~BIT(30);
}
if (mac->type == e1000_pch2lan) {
/* SHRAH[0,1,2] different than previous */
@@ -924,7 +933,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
mask &= 0xFFF4FFFF;
/* SHRAH[3] different than SHRAH[0,1,2] */
if (i == 4)
- mask |= (1 << 30);
+ mask |= BIT(30);
/* RAR[1-6] owned by management engine - skipping */
if (i > 0)
i += 6;
@@ -1019,7 +1028,7 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
/* Test each interrupt */
for (i = 0; i < 10; i++) {
/* Interrupt to test */
- mask = 1 << i;
+ mask = BIT(i);
if (adapter->flags & FLAG_IS_ICH) {
switch (mask) {
@@ -1387,7 +1396,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
case e1000_phy_82579:
/* Disable PHY energy detect power down */
e1e_rphy(hw, PHY_REG(0, 21), &phy_reg);
- e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~(1 << 3));
+ e1e_wphy(hw, PHY_REG(0, 21), phy_reg & ~BIT(3));
/* Disable full chip energy detect */
e1e_rphy(hw, PHY_REG(776, 18), &phy_reg);
e1e_wphy(hw, PHY_REG(776, 18), phy_reg | 1);
@@ -1453,7 +1462,7 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
/* disable autoneg */
ctrl = er32(TXCW);
- ctrl &= ~(1 << 31);
+ ctrl &= ~BIT(31);
ew32(TXCW, ctrl);
link = (er32(STATUS) & E1000_STATUS_LU);
@@ -1816,7 +1825,7 @@ static void e1000_diag_test(struct net_device *netdev,
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ e1000e_close(netdev);
if (e1000_reg_test(adapter, &data[0]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1849,7 +1858,7 @@ static void e1000_diag_test(struct net_device *netdev,
clear_bit(__E1000_TESTING, &adapter->state);
if (if_running)
- dev_open(netdev);
+ e1000e_open(netdev);
} else {
/* Online tests */
@@ -2283,19 +2292,19 @@ static int e1000e_get_ts_info(struct net_device *netdev,
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE);
- info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
- (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_ALL));
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters = (BIT(HWTSTAMP_FILTER_NONE) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_ALL));
if (adapter->ptp_clock)
info->phc_index = ptp_clock_index(adapter->ptp_clock);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index c0f4887ea44d..f3aaca743ea3 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1048,7 +1048,7 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
while (value > PCI_LTR_VALUE_MASK) {
scale++;
- value = DIV_ROUND_UP(value, (1 << 5));
+ value = DIV_ROUND_UP(value, BIT(5));
}
if (scale > E1000_LTRV_SCALE_MAX) {
e_dbg("Invalid LTR latency scale %d\n", scale);
@@ -1573,7 +1573,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
- phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
+ phy_reg |= BIT(HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
break;
@@ -2044,9 +2044,9 @@ static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
/* Restore SMBus frequency */
if (freq--) {
phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
- phy_data |= (freq & (1 << 0)) <<
+ phy_data |= (freq & BIT(0)) <<
HV_SMB_ADDR_FREQ_LOW_SHIFT;
- phy_data |= (freq & (1 << 1)) <<
+ phy_data |= (freq & BIT(1)) <<
(HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
} else {
e_dbg("Unsupported SMB frequency in PHY\n");
@@ -2530,7 +2530,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
/* disable Rx path while enabling/disabling workaround */
e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
- ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
+ ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | BIT(14));
if (ret_val)
return ret_val;
@@ -2561,7 +2561,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
/* Enable jumbo frame workaround in the MAC */
mac_reg = er32(FFLT_DBG);
- mac_reg &= ~(1 << 14);
+ mac_reg &= ~BIT(14);
mac_reg |= (7 << 15);
ew32(FFLT_DBG, mac_reg);
@@ -2576,7 +2576,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_CTRL_OFFSET,
- data | (1 << 0));
+ data | BIT(0));
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
@@ -2600,7 +2600,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, PHY_REG(769, 16), &data);
- data &= ~(1 << 13);
+ data &= ~BIT(13);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
return ret_val;
@@ -2614,7 +2614,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, HV_PM_CTRL, &data);
- ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data | BIT(10));
if (ret_val)
return ret_val;
} else {
@@ -2634,7 +2634,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
return ret_val;
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_CTRL_OFFSET,
- data & ~(1 << 0));
+ data & ~BIT(0));
if (ret_val)
return ret_val;
ret_val = e1000e_read_kmrn_reg(hw,
@@ -2657,7 +2657,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, PHY_REG(769, 16), &data);
- data |= (1 << 13);
+ data |= BIT(13);
ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
if (ret_val)
return ret_val;
@@ -2671,13 +2671,13 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
if (ret_val)
return ret_val;
e1e_rphy(hw, HV_PM_CTRL, &data);
- ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
+ ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~BIT(10));
if (ret_val)
return ret_val;
}
/* re-enable Rx path after enabling/disabling workaround */
- return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
+ return e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~BIT(14));
}
/**
@@ -4841,7 +4841,7 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
/* Extended Device Control */
reg = er32(CTRL_EXT);
- reg |= (1 << 22);
+ reg |= BIT(22);
/* Enable PHY low-power state when MAC is at D3 w/o WoL */
if (hw->mac.type >= e1000_pchlan)
reg |= E1000_CTRL_EXT_PHYPDEN;
@@ -4849,34 +4849,34 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL(0));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL(1));
- reg |= (1 << 22);
+ reg |= BIT(22);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
reg = er32(TARC(0));
if (hw->mac.type == e1000_ich8lan)
- reg |= (1 << 28) | (1 << 29);
- reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
+ reg |= BIT(28) | BIT(29);
+ reg |= BIT(23) | BIT(24) | BIT(26) | BIT(27);
ew32(TARC(0), reg);
/* Transmit Arbitration Control 1 */
reg = er32(TARC(1));
if (er32(TCTL) & E1000_TCTL_MULR)
- reg &= ~(1 << 28);
+ reg &= ~BIT(28);
else
- reg |= (1 << 28);
- reg |= (1 << 24) | (1 << 26) | (1 << 30);
+ reg |= BIT(28);
+ reg |= BIT(24) | BIT(26) | BIT(30);
ew32(TARC(1), reg);
/* Device Status */
if (hw->mac.type == e1000_ich8lan) {
reg = er32(STATUS);
- reg &= ~(1 << 31);
+ reg &= ~BIT(31);
ew32(STATUS, reg);
}
@@ -5885,7 +5885,8 @@ const struct e1000_info e1000_pch_lpt_info = {
| FLAG_HAS_JUMBO_FRAMES
| FLAG_APME_IN_WUC,
.flags2 = FLAG2_HAS_PHY_STATS
- | FLAG2_HAS_EEE,
+ | FLAG2_HAS_EEE
+ | FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 26,
.max_hw_frame_size = 9022,
.get_variants = e1000_get_variants_ich8lan,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 2311f6003f58..67163ca898ba 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -73,10 +73,10 @@
(ID_LED_OFF1_ON2 << 4) | \
(ID_LED_DEF1_DEF2))
-#define E1000_ICH_NVM_SIG_WORD 0x13
-#define E1000_ICH_NVM_SIG_MASK 0xC000
-#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0
-#define E1000_ICH_NVM_SIG_VALUE 0x80
+#define E1000_ICH_NVM_SIG_WORD 0x13u
+#define E1000_ICH_NVM_SIG_MASK 0xC000u
+#define E1000_ICH_NVM_VALID_SIG_MASK 0xC0u
+#define E1000_ICH_NVM_SIG_VALUE 0x80u
#define E1000_ICH8_LAN_INIT_TIMEOUT 1500
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index e59d7c283cd4..b322011ec282 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -346,7 +346,7 @@ void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
- hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
mc_addr_list += (ETH_ALEN);
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 9b4ec13d9161..7017281ba2dc 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -242,7 +242,7 @@ static void e1000e_dump(struct e1000_adapter *adapter)
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state trans_start last_rx\n");
pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
- netdev->state, netdev->trans_start, netdev->last_rx);
+ netdev->state, dev_trans_start(netdev), netdev->last_rx);
}
/* Print Registers */
@@ -317,8 +317,8 @@ static void e1000e_dump(struct e1000_adapter *adapter)
else
next_desc = "";
pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
- (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
- ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
+ (!(le64_to_cpu(u0->b) & BIT(29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & BIT(20)) ? 'd' : 'c')),
i,
(unsigned long long)le64_to_cpu(u0->a),
(unsigned long long)le64_to_cpu(u0->b),
@@ -2018,7 +2018,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
adapter->eiac_mask |= E1000_IMS_OTHER;
/* Cause Tx interrupts on every write back */
- ivar |= (1 << 31);
+ ivar |= BIT(31);
ew32(IVAR, ivar);
@@ -2709,7 +2709,7 @@ static int e1000_vlan_rx_add_vid(struct net_device *netdev,
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
- vfta |= (1 << (vid & 0x1F));
+ vfta |= BIT((vid & 0x1F));
hw->mac.ops.write_vfta(hw, index, vfta);
}
@@ -2737,7 +2737,7 @@ static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
- vfta &= ~(1 << (vid & 0x1F));
+ vfta &= ~BIT((vid & 0x1F));
hw->mac.ops.write_vfta(hw, index, vfta);
}
@@ -2789,7 +2789,7 @@ static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
}
/**
- * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
+ * e1000e_vlan_strip_disable - helper to disable HW VLAN stripping
* @adapter: board private structure to initialize
**/
static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
@@ -2878,7 +2878,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
/* Enable this decision filter in MANC2H */
if (mdef)
- manc2h |= (1 << i);
+ manc2h |= BIT(i);
j |= mdef;
}
@@ -2891,7 +2891,7 @@ static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
if (er32(MDEF(i)) == 0) {
ew32(MDEF(i), (E1000_MDEF_PORT_623 |
E1000_MDEF_PORT_664));
- manc2h |= (1 << 1);
+ manc2h |= BIT(1);
j++;
break;
}
@@ -2971,7 +2971,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
/* set the speed mode bit, we'll clear it if we're not at
* gigabit link later
*/
-#define SPEED_MODE_BIT (1 << 21)
+#define SPEED_MODE_BIT BIT(21)
tarc |= SPEED_MODE_BIT;
ew32(TARC(0), tarc);
}
@@ -3071,12 +3071,12 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
phy_data &= 0xfff8;
- phy_data |= (1 << 2);
+ phy_data |= BIT(2);
e1e_wphy(hw, PHY_REG(770, 26), phy_data);
e1e_rphy(hw, 22, &phy_data);
phy_data &= 0x0fff;
- phy_data |= (1 << 14);
+ phy_data |= BIT(14);
e1e_wphy(hw, 0x10, 0x2823);
e1e_wphy(hw, 0x11, 0x0003);
e1e_wphy(hw, 22, phy_data);
@@ -3368,12 +3368,12 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
* combining
*/
netdev_for_each_uc_addr(ha, netdev) {
- int rval;
+ int ret_val;
if (!rar_entries)
break;
- rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
- if (rval < 0)
+ ret_val = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+ if (ret_val < 0)
return -ENOMEM;
count++;
}
@@ -3503,8 +3503,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
!(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
u32 fextnvm7 = er32(FEXTNVM7);
- if (!(fextnvm7 & (1 << 0))) {
- ew32(FEXTNVM7, fextnvm7 | (1 << 0));
+ if (!(fextnvm7 & BIT(0))) {
+ ew32(FEXTNVM7, fextnvm7 | BIT(0));
e1e_flush();
}
}
@@ -3580,7 +3580,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
bool is_l4 = false;
bool is_l2 = false;
u32 regval;
- s32 ret_val;
if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
return -EINVAL;
@@ -3719,16 +3718,6 @@ static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
er32(RXSTMPH);
er32(TXSTMPH);
- /* Get and set the System Time Register SYSTIM base frequency */
- ret_val = e1000e_get_base_timinca(adapter, &regval);
- if (ret_val)
- return ret_val;
- ew32(TIMINCA, regval);
-
- /* reset the ns time counter */
- timecounter_init(&adapter->tc, &adapter->cc,
- ktime_to_ns(ktime_get_real()));
-
return 0;
}
@@ -3839,7 +3828,7 @@ static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
/* update thresholds: prefetch threshold to 31, host threshold to 1
* and make sure the granularity is "descriptors" and not "cache lines"
*/
- rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
+ rxdctl |= (0x1F | BIT(8) | E1000_RXDCTL_THRESH_UNIT_DESC);
ew32(RXDCTL(0), rxdctl);
/* momentarily enable the RX ring for the changes to take effect */
@@ -3885,6 +3874,53 @@ static void e1000_flush_desc_rings(struct e1000_adapter *adapter)
}
/**
+ * e1000e_systim_reset - reset the timesync registers after a hardware reset
+ * @adapter: board private structure
+ *
+ * When the MAC is reset, all hardware bits for timesync will be reset to the
+ * default values. This function will restore the settings last in place.
+ * Since the clock SYSTIME registers are reset, we will simply restore the
+ * cyclecounter to the kernel real clock time.
+ **/
+static void e1000e_systim_reset(struct e1000_adapter *adapter)
+{
+ struct ptp_clock_info *info = &adapter->ptp_clock_info;
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned long flags;
+ u32 timinca;
+ s32 ret_val;
+
+ if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
+ return;
+
+ if (info->adjfreq) {
+ /* restore the previous ptp frequency delta */
+ ret_val = info->adjfreq(info, adapter->ptp_delta);
+ } else {
+ /* set the default base frequency if no adjustment possible */
+ ret_val = e1000e_get_base_timinca(adapter, &timinca);
+ if (!ret_val)
+ ew32(TIMINCA, timinca);
+ }
+
+ if (ret_val) {
+ dev_warn(&adapter->pdev->dev,
+ "Failed to restore TIMINCA clock rate delta: %d\n",
+ ret_val);
+ return;
+ }
+
+ /* reset the systim ns time counter */
+ spin_lock_irqsave(&adapter->systim_lock, flags);
+ timecounter_init(&adapter->tc, &adapter->cc,
+ ktime_to_ns(ktime_get_real()));
+ spin_unlock_irqrestore(&adapter->systim_lock, flags);
+
+ /* restore the previous hwtstamp configuration settings */
+ e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+}
+
+/**
* e1000e_reset - bring the hardware into a known good state
*
* This function boots the hardware and enables some settings that
@@ -4063,8 +4099,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
e1000e_reset_adaptive(hw);
- /* initialize systim and reset the ns time counter */
- e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
+ /* restore systim and hwtstamp settings */
+ e1000e_systim_reset(adapter);
/* Set EEE advertisement as appropriate */
if (adapter->flags2 & FLAG2_HAS_EEE) {
@@ -4267,6 +4303,42 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
}
/**
+ * e1000e_sanitize_systim - sanitize raw cycle counter reads
+ * @hw: pointer to the HW structure
+ * @systim: cycle_t value read, sanitized and returned
+ *
+ * Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
+ * check to see that the time is incrementing at a reasonable
+ * rate and is a multiple of incvalue.
+ **/
+static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim)
+{
+ u64 time_delta, rem, temp;
+ cycle_t systim_next;
+ u32 incvalue;
+ int i;
+
+ incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
+ for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
+ /* latch SYSTIMH on read of SYSTIML */
+ systim_next = (cycle_t)er32(SYSTIML);
+ systim_next |= (cycle_t)er32(SYSTIMH) << 32;
+
+ time_delta = systim_next - systim;
+ temp = time_delta;
+ /* VMWare users have seen incvalue of zero, don't div / 0 */
+ rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
+
+ systim = systim_next;
+
+ if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0))
+ break;
+ }
+
+ return systim;
+}
+
+/**
* e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
* @cc: cyclecounter structure
**/
@@ -4275,55 +4347,33 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
struct e1000_hw *hw = &adapter->hw;
- u32 systimel_1, systimel_2, systimeh;
- cycle_t systim, systim_next;
+ u32 systimel, systimeh;
+ cycle_t systim;
/* SYSTIMH latching upon SYSTIML read does not work well.
* This means that if SYSTIML overflows after we read it but before
* we read SYSTIMH, the value of SYSTIMH has been incremented and we
* will experience a huge non linear increment in the systime value
* to fix that we test for overflow and if true, we re-read systime.
*/
- systimel_1 = er32(SYSTIML);
+ systimel = er32(SYSTIML);
systimeh = er32(SYSTIMH);
- systimel_2 = er32(SYSTIML);
- /* Check for overflow. If there was no overflow, use the values */
- if (systimel_1 < systimel_2) {
- systim = (cycle_t)systimel_1;
- systim |= (cycle_t)systimeh << 32;
- } else {
- /* There was an overflow, read again SYSTIMH, and use
- * systimel_2
- */
- systimeh = er32(SYSTIMH);
- systim = (cycle_t)systimel_2;
- systim |= (cycle_t)systimeh << 32;
+ /* Is systimel is so large that overflow is possible? */
+ if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) {
+ u32 systimel_2 = er32(SYSTIML);
+ if (systimel > systimel_2) {
+ /* There was an overflow, read again SYSTIMH, and use
+ * systimel_2
+ */
+ systimeh = er32(SYSTIMH);
+ systimel = systimel_2;
+ }
}
+ systim = (cycle_t)systimel;
+ systim |= (cycle_t)systimeh << 32;
- if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
- u64 incvalue, time_delta, rem, temp;
- int i;
+ if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
+ systim = e1000e_sanitize_systim(hw, systim);
- /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
- * check to see that the time is incrementing at a reasonable
- * rate and is a multiple of incvalue
- */
- incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
- for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
- /* latch SYSTIMH on read of SYSTIML */
- systim_next = (cycle_t)er32(SYSTIML);
- systim_next |= (cycle_t)er32(SYSTIMH) << 32;
-
- time_delta = systim_next - systim;
- temp = time_delta;
- rem = do_div(temp, incvalue);
-
- systim = systim_next;
-
- if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
- (rem == 0))
- break;
- }
- }
return systim;
}
@@ -4495,7 +4545,7 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
}
/**
- * e1000_open - Called when a network interface is made active
+ * e1000e_open - Called when a network interface is made active
* @netdev: network interface device structure
*
* Returns 0 on success, negative value on failure
@@ -4506,7 +4556,7 @@ static int e1000_test_msi(struct e1000_adapter *adapter)
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
**/
-static int e1000_open(struct net_device *netdev)
+int e1000e_open(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4604,7 +4654,7 @@ err_setup_tx:
}
/**
- * e1000_close - Disables a network interface
+ * e1000e_close - Disables a network interface
* @netdev: network interface device structure
*
* Returns 0, this is not allowed to fail
@@ -4614,7 +4664,7 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-static int e1000_close(struct net_device *netdev)
+int e1000e_close(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
@@ -6861,7 +6911,7 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
le16_to_cpus(&buf);
- if (!ret_val && (!(buf & (1 << 0)))) {
+ if (!ret_val && (!(buf & BIT(0)))) {
/* Deep Smart Power Down (DSPD) */
dev_warn(&adapter->pdev->dev,
"Warning: detected DSPD enabled in EEPROM\n");
@@ -6878,6 +6928,14 @@ static netdev_features_t e1000_fix_features(struct net_device *netdev,
if ((hw->mac.type >= e1000_pch2lan) && (netdev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_RXFCS;
+ /* Since there is no support for separate Rx/Tx vlan accel
+ * enable/disable make sure Tx flag is always in same state as Rx.
+ */
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ features |= NETIF_F_HW_VLAN_CTAG_TX;
+ else
+ features &= ~NETIF_F_HW_VLAN_CTAG_TX;
+
return features;
}
@@ -6920,8 +6978,8 @@ static int e1000_set_features(struct net_device *netdev,
}
static const struct net_device_ops e1000e_netdev_ops = {
- .ndo_open = e1000_open,
- .ndo_stop = e1000_close,
+ .ndo_open = e1000e_open,
+ .ndo_stop = e1000e_close,
.ndo_start_xmit = e1000_xmit_frame,
.ndo_get_stats64 = e1000e_get_stats64,
.ndo_set_rx_mode = e1000e_set_rx_mode,
@@ -6965,7 +7023,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int bars, i, err, pci_using_dac;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
- s32 rval = 0;
+ s32 ret_val = 0;
if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
aspm_disable_flag = PCIE_LINK_STATE_L0S;
@@ -7200,18 +7258,18 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
(adapter->hw.bus.func == 1))
- rval = e1000_read_nvm(&adapter->hw,
+ ret_val = e1000_read_nvm(&adapter->hw,
NVM_INIT_CONTROL3_PORT_B,
1, &eeprom_data);
else
- rval = e1000_read_nvm(&adapter->hw,
+ ret_val = e1000_read_nvm(&adapter->hw,
NVM_INIT_CONTROL3_PORT_A,
1, &eeprom_data);
}
/* fetch WoL from EEPROM */
- if (rval)
- e_dbg("NVM read error getting WoL initial values: %d\n", rval);
+ if (ret_val)
+ e_dbg("NVM read error getting WoL initial values: %d\n", ret_val);
else if (eeprom_data & eeprom_apme_mask)
adapter->eeprom_wol |= E1000_WUFC_MAG;
@@ -7231,13 +7289,16 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
device_wakeup_enable(&pdev->dev);
/* save off EEPROM version number */
- rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
+ ret_val = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
- if (rval) {
- e_dbg("NVM read error getting EEPROM version: %d\n", rval);
+ if (ret_val) {
+ e_dbg("NVM read error getting EEPROM version: %d\n", ret_val);
adapter->eeprom_vers = 0;
}
+ /* init PTP hardware clock */
+ e1000e_ptp_init(adapter);
+
/* reset the hardware with the new settings */
e1000e_reset(adapter);
@@ -7256,9 +7317,6 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- /* init PTP hardware clock */
- e1000e_ptp_init(adapter);
-
e1000_print_device_info(adapter);
if (pci_dev_run_wake(pdev))
@@ -7284,8 +7342,7 @@ err_flashmap:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -7352,8 +7409,7 @@ static void e1000_remove(struct pci_dev *pdev)
if ((adapter->hw.flash_address) &&
(adapter->hw.mac.type < e1000_pch_spt))
iounmap(adapter->hw.flash_address);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index 49f205c023bf..2efd80dfd88e 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -67,7 +67,7 @@ static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
u32 eecd = er32(EECD);
u32 mask;
- mask = 0x01 << (count - 1);
+ mask = BIT(count - 1);
if (nvm->type == e1000_nvm_eeprom_spi)
eecd |= E1000_EECD_DO;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index de13aeacae97..d78d47b41a71 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2894,11 +2894,11 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
if ((hw->phy.type == e1000_phy_82578) &&
(hw->phy.revision >= 1) &&
(hw->phy.addr == 2) &&
- !(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
+ !(MAX_PHY_REG_ADDRESS & reg) && (data & BIT(11))) {
u16 data2 = 0x7EFF;
ret_val = e1000_access_phy_debug_regs_hv(hw,
- (1 << 6) | 0x3,
+ BIT(6) | 0x3,
&data2, false);
if (ret_val)
goto out;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index 55bfe473514d..3027f63ee793 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -104,9 +104,9 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define BM_WUC_DATA_OPCODE 0x12
#define BM_WUC_ENABLE_PAGE BM_PORT_CTRL_PAGE
#define BM_WUC_ENABLE_REG 17
-#define BM_WUC_ENABLE_BIT (1 << 2)
-#define BM_WUC_HOST_WU_BIT (1 << 4)
-#define BM_WUC_ME_WU_BIT (1 << 5)
+#define BM_WUC_ENABLE_BIT BIT(2)
+#define BM_WUC_HOST_WU_BIT BIT(4)
+#define BM_WUC_ME_WU_BIT BIT(5)
#define PHY_UPPER_SHIFT 21
#define BM_PHY_REG(page, reg) \
@@ -124,8 +124,8 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
#define I82578_ADDR_REG 29
#define I82577_ADDR_REG 16
#define I82577_CFG_REG 22
-#define I82577_CFG_ASSERT_CRS_ON_TX (1 << 15)
-#define I82577_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift */
+#define I82577_CFG_ASSERT_CRS_ON_TX BIT(15)
+#define I82577_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift */
#define I82577_CTRL_REG 23
/* 82577 specific PHY registers */
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index e2ff3ef75d5d..2e1b17ad52a3 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -79,6 +79,8 @@ static int e1000e_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
ew32(TIMINCA, timinca);
+ adapter->ptp_delta = delta;
+
spin_unlock_irqrestore(&adapter->systim_lock, flags);
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/Makefile b/drivers/net/ethernet/intel/fm10k/Makefile
index b006ff66d028..cac645329cea 100644
--- a/drivers/net/ethernet/intel/fm10k/Makefile
+++ b/drivers/net/ethernet/intel/fm10k/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
-# Intel Ethernet Switch Host Interface Driver
-# Copyright(c) 2013 - 2015 Intel Corporation.
+# Intel(R) Ethernet Switch Host Interface Driver
+# Copyright(c) 2013 - 2016 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -22,7 +22,7 @@
################################################################################
#
-# Makefile for the Intel(R) FM10000 Ethernet Switch Host Interface driver
+# Makefile for the Intel(R) Ethernet Switch Host Interface Driver
#
obj-$(CONFIG_FM10K) += fm10k.o
@@ -30,7 +30,6 @@ obj-$(CONFIG_FM10K) += fm10k.o
fm10k-y := fm10k_main.o \
fm10k_common.o \
fm10k_pci.o \
- fm10k_ptp.o \
fm10k_netdev.o \
fm10k_ethtool.o \
fm10k_pf.o \
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k.h b/drivers/net/ethernet/intel/fm10k/fm10k.h
index b34bb008b104..c4cf08dcf5af 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,9 +27,6 @@
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
#include <linux/pci.h>
-#include <linux/net_tstamp.h>
-#include <linux/clocksource.h>
-#include <linux/ptp_clock_kernel.h>
#include "fm10k_pf.h"
#include "fm10k_vf.h"
@@ -262,12 +259,12 @@ struct fm10k_intfc {
unsigned long state;
u32 flags;
-#define FM10K_FLAG_RESET_REQUESTED (u32)(1 << 0)
-#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(1 << 1)
-#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(1 << 2)
-#define FM10K_FLAG_RX_TS_ENABLED (u32)(1 << 3)
-#define FM10K_FLAG_SWPRI_CONFIG (u32)(1 << 4)
-#define FM10K_FLAG_DEBUG_STATS (u32)(1 << 5)
+#define FM10K_FLAG_RESET_REQUESTED (u32)(BIT(0))
+#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(BIT(1))
+#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(BIT(2))
+#define FM10K_FLAG_RX_TS_ENABLED (u32)(BIT(3))
+#define FM10K_FLAG_SWPRI_CONFIG (u32)(BIT(4))
+#define FM10K_FLAG_DEBUG_STATS (u32)(BIT(5))
int xcast_mode;
/* Tx fast path data */
@@ -333,6 +330,7 @@ struct fm10k_intfc {
unsigned long last_reset;
unsigned long link_down_event;
bool host_ready;
+ bool lport_map_failed;
u32 reta[FM10K_RETA_SIZE];
u32 rssrk[FM10K_RSSRK_SIZE];
@@ -342,22 +340,8 @@ struct fm10k_intfc {
#ifdef CONFIG_DEBUG_FS
struct dentry *dbg_intfc;
-
#endif /* CONFIG_DEBUG_FS */
- struct ptp_clock_info ptp_caps;
- struct ptp_clock *ptp_clock;
-
- struct sk_buff_head ts_tx_skb_queue;
- u32 tx_hwtstamp_timeouts;
- struct hwtstamp_config ts_config;
- /* We are unable to actually adjust the clock beyond the frequency
- * value. Once the clock is started there is no resetting it. As
- * such we maintain a separate offset from the actual hardware clock
- * to allow for offset adjustment.
- */
- s64 ptp_adjust;
- rwlock_t systime_lock;
#ifdef CONFIG_DCB
u8 pfc_en;
#endif
@@ -378,6 +362,7 @@ enum fm10k_state_t {
__FM10K_SERVICE_DISABLE,
__FM10K_MBX_LOCK,
__FM10K_LINK_DOWN,
+ __FM10K_UPDATING_STATS,
};
static inline void fm10k_mbx_lock(struct fm10k_intfc *interface)
@@ -422,7 +407,7 @@ static inline u16 fm10k_desc_unused(struct fm10k_ring *ring)
(&(((union fm10k_rx_desc *)((R)->desc))[i]))
#define FM10K_MAX_TXD_PWR 14
-#define FM10K_MAX_DATA_PER_TXD BIT(FM10K_MAX_TXD_PWR)
+#define FM10K_MAX_DATA_PER_TXD (1u << FM10K_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD)
@@ -473,6 +458,7 @@ __be16 fm10k_tx_encap_offload(struct sk_buff *skb);
netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
struct fm10k_ring *tx_ring);
void fm10k_tx_timeout_reset(struct fm10k_intfc *interface);
+u64 fm10k_get_tx_pending(struct fm10k_ring *ring);
bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring);
void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count);
@@ -510,6 +496,8 @@ int fm10k_close(struct net_device *netdev);
/* Ethtool */
void fm10k_set_ethtool_ops(struct net_device *dev);
+u32 fm10k_get_reta_size(struct net_device *netdev);
+void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir);
/* IOV */
s32 fm10k_iov_event(struct fm10k_intfc *interface);
@@ -544,21 +532,6 @@ static inline void fm10k_dbg_init(void) {}
static inline void fm10k_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS */
-/* Time Stamping */
-void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
- struct skb_shared_hwtstamps *hwtstamp,
- u64 systime);
-void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb);
-void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
- u64 systime);
-void fm10k_ts_reset(struct fm10k_intfc *interface);
-void fm10k_ts_init(struct fm10k_intfc *interface);
-void fm10k_ts_tx_subtask(struct fm10k_intfc *interface);
-void fm10k_ptp_register(struct fm10k_intfc *interface);
-void fm10k_ptp_unregister(struct fm10k_intfc *interface);
-int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
-int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
-
/* DCB */
#ifdef CONFIG_DCB
void fm10k_dcbnl_set_ops(struct net_device *dev);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index 6cfae6ac04ea..d6baaea8bc7c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -519,8 +519,12 @@ s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
goto out;
/* interface cannot receive traffic without logical ports */
- if (mac->dglort_map == FM10K_DGLORTMAP_NONE)
+ if (mac->dglort_map == FM10K_DGLORTMAP_NONE) {
+ if (hw->mac.ops.request_lport_map)
+ ret_val = hw->mac.ops.request_lport_map(hw);
+
goto out;
+ }
/* if we passed all the tests above then the switch is ready and we no
* longer need to check for link
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.h b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
index 45e4e5b1f20a..50f71e997448 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
index 2be4361839db..db4bd8bf9722 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_dcbnl.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
index 5d6137faf7d1..5116fd043630 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_debugfs.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
index 2f6a05b57228..c04cbe9c9f7c 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -76,20 +76,9 @@ static const struct fm10k_stats fm10k_gstrings_global_stats[] = {
FM10K_STAT("mac_rules_used", hw.swapi.mac.used),
FM10K_STAT("mac_rules_avail", hw.swapi.mac.avail),
- FM10K_STAT("tx_hang_count", tx_timeout_count),
-
- FM10K_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
-};
+ FM10K_STAT("reset_while_pending", hw.mac.reset_while_pending),
-static const struct fm10k_stats fm10k_gstrings_debug_stats[] = {
- FM10K_STAT("hw_sm_mbx_full", hw_sm_mbx_full),
- FM10K_STAT("hw_csum_tx_good", hw_csum_tx_good),
- FM10K_STAT("hw_csum_rx_good", hw_csum_rx_good),
- FM10K_STAT("rx_switch_errors", rx_switch_errors),
- FM10K_STAT("rx_drops", rx_drops),
- FM10K_STAT("rx_pp_errors", rx_pp_errors),
- FM10K_STAT("rx_link_errors", rx_link_errors),
- FM10K_STAT("rx_length_errors", rx_length_errors),
+ FM10K_STAT("tx_hang_count", tx_timeout_count),
};
static const struct fm10k_stats fm10k_gstrings_pf_stats[] = {
@@ -121,13 +110,21 @@ static const struct fm10k_stats fm10k_gstrings_mbx_stats[] = {
FM10K_MBX_STAT("mbx_rx_mbmem_pushed", rx_mbmem_pushed),
};
+#define FM10K_QUEUE_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .sizeof_stat = FIELD_SIZEOF(struct fm10k_ring, _stat), \
+ .stat_offset = offsetof(struct fm10k_ring, _stat) \
+}
+
+static const struct fm10k_stats fm10k_gstrings_queue_stats[] = {
+ FM10K_QUEUE_STAT("packets", stats.packets),
+ FM10K_QUEUE_STAT("bytes", stats.bytes),
+};
+
#define FM10K_GLOBAL_STATS_LEN ARRAY_SIZE(fm10k_gstrings_global_stats)
-#define FM10K_DEBUG_STATS_LEN ARRAY_SIZE(fm10k_gstrings_debug_stats)
#define FM10K_PF_STATS_LEN ARRAY_SIZE(fm10k_gstrings_pf_stats)
#define FM10K_MBX_STATS_LEN ARRAY_SIZE(fm10k_gstrings_mbx_stats)
-
-#define FM10K_QUEUE_STATS_LEN(_n) \
- ((_n) * 2 * (sizeof(struct fm10k_queue_stats) / sizeof(u64)))
+#define FM10K_QUEUE_STATS_LEN ARRAY_SIZE(fm10k_gstrings_queue_stats)
#define FM10K_STATIC_STATS_LEN (FM10K_GLOBAL_STATS_LEN + \
FM10K_NETDEV_STATS_LEN + \
@@ -145,77 +142,56 @@ enum fm10k_self_test_types {
};
enum {
- FM10K_PRV_FLAG_DEBUG_STATS,
FM10K_PRV_FLAG_LEN,
};
static const char fm10k_prv_flags[FM10K_PRV_FLAG_LEN][ETH_GSTRING_LEN] = {
- "debug-statistics",
};
+static void fm10k_add_stat_strings(char **p, const char *prefix,
+ const struct fm10k_stats stats[],
+ const unsigned int size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++) {
+ snprintf(*p, ETH_GSTRING_LEN, "%s%s",
+ prefix, stats[i].stat_string);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
static void fm10k_get_stat_strings(struct net_device *dev, u8 *data)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_iov_data *iov_data = interface->iov_data;
char *p = (char *)data;
unsigned int i;
- unsigned int j;
- for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_net_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_net_stats,
+ FM10K_NETDEV_STATS_LEN);
- for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_global_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_global_stats,
+ FM10K_GLOBAL_STATS_LEN);
- if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
- for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_debug_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- }
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_mbx_stats,
+ FM10K_MBX_STATS_LEN);
- for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_mbx_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
+ if (interface->hw.mac.type != fm10k_mac_vf)
+ fm10k_add_stat_strings(&p, "", fm10k_gstrings_pf_stats,
+ FM10K_PF_STATS_LEN);
- if (interface->hw.mac.type != fm10k_mac_vf) {
- for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
- memcpy(p, fm10k_gstrings_pf_stats[i].stat_string,
- ETH_GSTRING_LEN);
- p += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < interface->hw.mac.max_queues; i++) {
+ char prefix[ETH_GSTRING_LEN];
- if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
- for (i = 0; i < iov_data->num_vfs; i++) {
- for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
- snprintf(p,
- ETH_GSTRING_LEN,
- "vf_%u_%s", i,
- fm10k_gstrings_mbx_stats[j].stat_string);
- p += ETH_GSTRING_LEN;
- }
- }
- }
+ snprintf(prefix, ETH_GSTRING_LEN, "tx_queue_%u_", i);
+ fm10k_add_stat_strings(&p, prefix,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
- for (i = 0; i < interface->hw.mac.max_queues; i++) {
- snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "tx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_packets", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_bytes", i);
- p += ETH_GSTRING_LEN;
+ snprintf(prefix, ETH_GSTRING_LEN, "rx_queue_%u_", i);
+ fm10k_add_stat_strings(&p, prefix,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
}
}
@@ -242,7 +218,6 @@ static void fm10k_get_strings(struct net_device *dev,
static int fm10k_get_sset_count(struct net_device *dev, int sset)
{
struct fm10k_intfc *interface = netdev_priv(dev);
- struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
int stats_len = FM10K_STATIC_STATS_LEN;
@@ -250,19 +225,11 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_TEST:
return FM10K_TEST_LEN;
case ETH_SS_STATS:
- stats_len += FM10K_QUEUE_STATS_LEN(hw->mac.max_queues);
+ stats_len += hw->mac.max_queues * 2 * FM10K_QUEUE_STATS_LEN;
if (hw->mac.type != fm10k_mac_vf)
stats_len += FM10K_PF_STATS_LEN;
- if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
- stats_len += FM10K_DEBUG_STATS_LEN;
-
- if (iov_data)
- stats_len += FM10K_MBX_STATS_LEN *
- iov_data->num_vfs;
- }
-
return stats_len;
case ETH_SS_PRIV_FLAGS:
return FM10K_PRV_FLAG_LEN;
@@ -271,93 +238,80 @@ static int fm10k_get_sset_count(struct net_device *dev, int sset)
}
}
-static void fm10k_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats __always_unused *stats,
- u64 *data)
+static void fm10k_add_ethtool_stats(u64 **data, void *pointer,
+ const struct fm10k_stats stats[],
+ const unsigned int size)
{
- const int stat_count = sizeof(struct fm10k_queue_stats) / sizeof(u64);
- struct fm10k_intfc *interface = netdev_priv(netdev);
- struct fm10k_iov_data *iov_data = interface->iov_data;
- struct net_device_stats *net_stats = &netdev->stats;
+ unsigned int i;
char *p;
- int i, j;
- fm10k_update_stats(interface);
-
- for (i = 0; i < FM10K_NETDEV_STATS_LEN; i++) {
- p = (char *)net_stats + fm10k_gstrings_net_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_net_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ if (!pointer) {
+ /* memory is not zero allocated so we have to clear it */
+ for (i = 0; i < size; i++)
+ *((*data)++) = 0;
+ return;
}
- for (i = 0; i < FM10K_GLOBAL_STATS_LEN; i++) {
- p = (char *)interface +
- fm10k_gstrings_global_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_global_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
+ for (i = 0; i < size; i++) {
+ p = (char *)pointer + stats[i].stat_offset;
- if (interface->flags & FM10K_FLAG_DEBUG_STATS) {
- for (i = 0; i < FM10K_DEBUG_STATS_LEN; i++) {
- p = (char *)interface +
- fm10k_gstrings_debug_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_debug_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ switch (stats[i].sizeof_stat) {
+ case sizeof(u64):
+ *((*data)++) = *(u64 *)p;
+ break;
+ case sizeof(u32):
+ *((*data)++) = *(u32 *)p;
+ break;
+ case sizeof(u16):
+ *((*data)++) = *(u16 *)p;
+ break;
+ case sizeof(u8):
+ *((*data)++) = *(u8 *)p;
+ break;
+ default:
+ *((*data)++) = 0;
}
}
+}
- for (i = 0; i < FM10K_MBX_STATS_LEN; i++) {
- p = (char *)&interface->hw.mbx +
- fm10k_gstrings_mbx_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_mbx_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
+static void fm10k_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ struct fm10k_intfc *interface = netdev_priv(netdev);
+ struct net_device_stats *net_stats = &netdev->stats;
+ int i;
- if (interface->hw.mac.type != fm10k_mac_vf) {
- for (i = 0; i < FM10K_PF_STATS_LEN; i++) {
- p = (char *)interface +
- fm10k_gstrings_pf_stats[i].stat_offset;
- *(data++) = (fm10k_gstrings_pf_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- }
+ fm10k_update_stats(interface);
- if ((interface->flags & FM10K_FLAG_DEBUG_STATS) && iov_data) {
- for (i = 0; i < iov_data->num_vfs; i++) {
- struct fm10k_vf_info *vf_info;
+ fm10k_add_ethtool_stats(&data, net_stats, fm10k_gstrings_net_stats,
+ FM10K_NETDEV_STATS_LEN);
- vf_info = &iov_data->vf_info[i];
+ fm10k_add_ethtool_stats(&data, interface, fm10k_gstrings_global_stats,
+ FM10K_GLOBAL_STATS_LEN);
- /* skip stats if we don't have a vf info */
- if (!vf_info) {
- data += FM10K_MBX_STATS_LEN;
- continue;
- }
+ fm10k_add_ethtool_stats(&data, &interface->hw.mbx,
+ fm10k_gstrings_mbx_stats,
+ FM10K_MBX_STATS_LEN);
- for (j = 0; j < FM10K_MBX_STATS_LEN; j++) {
- p = (char *)&vf_info->mbx +
- fm10k_gstrings_mbx_stats[j].stat_offset;
- *(data++) = (fm10k_gstrings_mbx_stats[j].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- }
+ if (interface->hw.mac.type != fm10k_mac_vf) {
+ fm10k_add_ethtool_stats(&data, interface,
+ fm10k_gstrings_pf_stats,
+ FM10K_PF_STATS_LEN);
}
for (i = 0; i < interface->hw.mac.max_queues; i++) {
struct fm10k_ring *ring;
- u64 *queue_stat;
ring = interface->tx_ring[i];
- if (ring)
- queue_stat = (u64 *)&ring->stats;
- for (j = 0; j < stat_count; j++)
- *(data++) = ring ? queue_stat[j] : 0;
+ fm10k_add_ethtool_stats(&data, ring,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
ring = interface->rx_ring[i];
- if (ring)
- queue_stat = (u64 *)&ring->stats;
- for (j = 0; j < stat_count; j++)
- *(data++) = ring ? queue_stat[j] : 0;
+ fm10k_add_ethtool_stats(&data, ring,
+ fm10k_gstrings_queue_stats,
+ FM10K_QUEUE_STATS_LEN);
}
}
@@ -425,7 +379,7 @@ static void fm10k_get_regs(struct net_device *netdev,
u32 *buff = p;
u16 i;
- regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+ regs->version = BIT(24) | (hw->revision_id << 16) | hw->device_id;
switch (hw->mac.type) {
case fm10k_mac_pf:
@@ -935,15 +889,15 @@ static int fm10k_mbx_test(struct fm10k_intfc *interface, u64 *data)
struct fm10k_mbx_info *mbx = &hw->mbx;
u32 attr_flag, test_msg[6];
unsigned long timeout;
- int err;
+ int err = -EINVAL;
/* For now this is a VF only feature */
if (hw->mac.type != fm10k_mac_vf)
return 0;
/* loop through both nested and unnested attribute types */
- for (attr_flag = (1 << FM10K_TEST_MSG_UNSET);
- attr_flag < (1 << (2 * FM10K_TEST_MSG_NESTED));
+ for (attr_flag = BIT(FM10K_TEST_MSG_UNSET);
+ attr_flag < BIT(2 * FM10K_TEST_MSG_NESTED);
attr_flag += attr_flag) {
/* generate message to be tested */
fm10k_tlv_msg_test_create(test_msg, attr_flag);
@@ -1001,35 +955,57 @@ static void fm10k_self_test(struct net_device *dev,
static u32 fm10k_get_priv_flags(struct net_device *netdev)
{
- struct fm10k_intfc *interface = netdev_priv(netdev);
- u32 priv_flags = 0;
-
- if (interface->flags & FM10K_FLAG_DEBUG_STATS)
- priv_flags |= 1 << FM10K_PRV_FLAG_DEBUG_STATS;
-
- return priv_flags;
+ return 0;
}
static int fm10k_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
- struct fm10k_intfc *interface = netdev_priv(netdev);
-
- if (priv_flags >= (1 << FM10K_PRV_FLAG_LEN))
+ if (priv_flags >= BIT(FM10K_PRV_FLAG_LEN))
return -EINVAL;
- if (priv_flags & (1 << FM10K_PRV_FLAG_DEBUG_STATS))
- interface->flags |= FM10K_FLAG_DEBUG_STATS;
- else
- interface->flags &= ~FM10K_FLAG_DEBUG_STATS;
-
return 0;
}
-static u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
+u32 fm10k_get_reta_size(struct net_device __always_unused *netdev)
{
return FM10K_RETA_SIZE * FM10K_RETA_ENTRIES_PER_REG;
}
+void fm10k_write_reta(struct fm10k_intfc *interface, const u32 *indir)
+{
+ u16 rss_i = interface->ring_feature[RING_F_RSS].indices;
+ struct fm10k_hw *hw = &interface->hw;
+ u32 table[4];
+ int i, j;
+
+ /* record entries to reta table */
+ for (i = 0; i < FM10K_RETA_SIZE; i++) {
+ u32 reta, n;
+
+ /* generate a new table if we weren't given one */
+ for (j = 0; j < 4; j++) {
+ if (indir)
+ n = indir[4 * i + j];
+ else
+ n = ethtool_rxfh_indir_default(4 * i + j,
+ rss_i);
+
+ table[j] = n;
+ }
+
+ reta = table[0] |
+ (table[1] << 8) |
+ (table[2] << 16) |
+ (table[3] << 24);
+
+ if (interface->reta[i] == reta)
+ continue;
+
+ interface->reta[i] = reta;
+ fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
+ }
+}
+
static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
@@ -1053,7 +1029,6 @@ static int fm10k_get_reta(struct net_device *netdev, u32 *indir)
static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
- struct fm10k_hw *hw = &interface->hw;
int i;
u16 rss_i;
@@ -1068,19 +1043,7 @@ static int fm10k_set_reta(struct net_device *netdev, const u32 *indir)
return -EINVAL;
}
- /* record entries to reta table */
- for (i = 0; i < FM10K_RETA_SIZE; i++, indir += 4) {
- u32 reta = indir[0] |
- (indir[1] << 8) |
- (indir[2] << 16) |
- (indir[3] << 24);
-
- if (interface->reta[i] == reta)
- continue;
-
- interface->reta[i] = reta;
- fm10k_write_reg(hw, FM10K_RETA(0, i), reta);
- }
+ fm10k_write_reta(interface, indir);
return 0;
}
@@ -1145,7 +1108,7 @@ static unsigned int fm10k_max_channels(struct net_device *dev)
/* For QoS report channels per traffic class */
if (tcs > 1)
- max_combined = 1 << (fls(max_combined / tcs) - 1);
+ max_combined = BIT((fls(max_combined / tcs) - 1));
return max_combined;
}
@@ -1192,33 +1155,6 @@ static int fm10k_set_channels(struct net_device *dev,
return fm10k_setup_tc(dev, netdev_get_num_tc(dev));
}
-static int fm10k_get_ts_info(struct net_device *dev,
- struct ethtool_ts_info *info)
-{
- struct fm10k_intfc *interface = netdev_priv(dev);
-
- info->so_timestamping =
- SOF_TIMESTAMPING_TX_SOFTWARE |
- SOF_TIMESTAMPING_RX_SOFTWARE |
- SOF_TIMESTAMPING_SOFTWARE |
- SOF_TIMESTAMPING_TX_HARDWARE |
- SOF_TIMESTAMPING_RX_HARDWARE |
- SOF_TIMESTAMPING_RAW_HARDWARE;
-
- if (interface->ptp_clock)
- info->phc_index = ptp_clock_index(interface->ptp_clock);
- else
- info->phc_index = -1;
-
- info->tx_types = (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_ALL);
-
- return 0;
-}
-
static const struct ethtool_ops fm10k_ethtool_ops = {
.get_strings = fm10k_get_strings,
.get_sset_count = fm10k_get_sset_count,
@@ -1246,7 +1182,6 @@ static const struct ethtool_ops fm10k_ethtool_ops = {
.set_rxfh = fm10k_set_rssh,
.get_channels = fm10k_get_channels,
.set_channels = fm10k_set_channels,
- .get_ts_info = fm10k_get_ts_info,
};
void fm10k_set_ethtool_ops(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
index acfb8b1f88a7..47f0743ec03b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_iov.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -50,7 +50,7 @@ s32 fm10k_iov_event(struct fm10k_intfc *interface)
s64 vflre;
int i;
- /* if there is no iov_data then there is no mailboxes to process */
+ /* if there is no iov_data then there is no mailbox to process */
if (!ACCESS_ONCE(interface->iov_data))
return 0;
@@ -98,7 +98,7 @@ s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
struct fm10k_iov_data *iov_data;
int i;
- /* if there is no iov_data then there is no mailboxes to process */
+ /* if there is no iov_data then there is no mailbox to process */
if (!ACCESS_ONCE(interface->iov_data))
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 4de17db3808c..e9767b6366a8 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -28,16 +28,16 @@
#include "fm10k.h"
-#define DRV_VERSION "0.19.3-k"
+#define DRV_VERSION "0.21.2-k"
+#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
-static const char fm10k_driver_string[] =
- "Intel(R) Ethernet Switch Host Interface Driver";
+static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
- "Copyright (c) 2013 Intel Corporation.";
+ "Copyright (c) 2013 - 2016 Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) Ethernet Switch Host Interface Driver");
+MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
@@ -56,7 +56,7 @@ static int __init fm10k_init_module(void)
pr_info("%s\n", fm10k_copyright);
/* create driver workqueue */
- fm10k_workqueue = create_workqueue("fm10k");
+ fm10k_workqueue = alloc_workqueue("fm10k", WQ_MEM_RECLAIM, 0);
fm10k_dbg_init();
@@ -77,7 +77,6 @@ static void __exit fm10k_exit_module(void)
fm10k_dbg_exit();
/* destroy driver workqueue */
- flush_workqueue(fm10k_workqueue);
destroy_workqueue(fm10k_workqueue);
}
module_exit(fm10k_exit_module);
@@ -272,7 +271,7 @@ static bool fm10k_add_rx_frag(struct fm10k_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192)
unsigned int truesize = FM10K_RX_BUFSZ;
#else
- unsigned int truesize = SKB_DATA_ALIGN(size);
+ unsigned int truesize = ALIGN(size, 512);
#endif
unsigned int pull_len;
@@ -401,10 +400,10 @@ static inline void fm10k_rx_checksum(struct fm10k_ring *ring,
}
#define FM10K_RSS_L4_TYPES_MASK \
- ((1ul << FM10K_RSSTYPE_IPV4_TCP) | \
- (1ul << FM10K_RSSTYPE_IPV4_UDP) | \
- (1ul << FM10K_RSSTYPE_IPV6_TCP) | \
- (1ul << FM10K_RSSTYPE_IPV6_UDP))
+ (BIT(FM10K_RSSTYPE_IPV4_TCP) | \
+ BIT(FM10K_RSSTYPE_IPV4_UDP) | \
+ BIT(FM10K_RSSTYPE_IPV6_TCP) | \
+ BIT(FM10K_RSSTYPE_IPV6_UDP))
static inline void fm10k_rx_hash(struct fm10k_ring *ring,
union fm10k_rx_desc *rx_desc,
@@ -420,23 +419,10 @@ static inline void fm10k_rx_hash(struct fm10k_ring *ring,
return;
skb_set_hash(skb, le32_to_cpu(rx_desc->d.rss),
- (FM10K_RSS_L4_TYPES_MASK & (1ul << rss_type)) ?
+ (BIT(rss_type) & FM10K_RSS_L4_TYPES_MASK) ?
PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
-static void fm10k_rx_hwtstamp(struct fm10k_ring *rx_ring,
- union fm10k_rx_desc *rx_desc,
- struct sk_buff *skb)
-{
- struct fm10k_intfc *interface = rx_ring->q_vector->interface;
-
- FM10K_CB(skb)->tstamp = rx_desc->q.timestamp;
-
- if (unlikely(interface->flags & FM10K_FLAG_RX_TS_ENABLED))
- fm10k_systime_to_hwtstamp(interface, skb_hwtstamps(skb),
- le64_to_cpu(rx_desc->q.timestamp));
-}
-
static void fm10k_type_trans(struct fm10k_ring *rx_ring,
union fm10k_rx_desc __maybe_unused *rx_desc,
struct sk_buff *skb)
@@ -486,8 +472,6 @@ static unsigned int fm10k_process_skb_fields(struct fm10k_ring *rx_ring,
fm10k_rx_checksum(rx_ring, rx_desc, skb);
- fm10k_rx_hwtstamp(rx_ring, rx_desc, skb);
-
FM10K_CB(skb)->fi.w.vlan = rx_desc->w.vlan;
skb_record_rx_queue(skb, rx_ring->queue_index);
@@ -835,6 +819,8 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
struct ipv6hdr *ipv6;
u8 *raw;
} network_hdr;
+ u8 *transport_hdr;
+ __be16 frag_off;
__be16 protocol;
u8 l4_hdr = 0;
@@ -852,9 +838,11 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
goto no_csum;
}
network_hdr.raw = skb_inner_network_header(skb);
+ transport_hdr = skb_inner_transport_header(skb);
} else {
protocol = vlan_get_protocol(skb);
network_hdr.raw = skb_network_header(skb);
+ transport_hdr = skb_transport_header(skb);
}
switch (protocol) {
@@ -863,15 +851,17 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
break;
case htons(ETH_P_IPV6):
l4_hdr = network_hdr.ipv6->nexthdr;
+ if (likely((transport_hdr - network_hdr.raw) ==
+ sizeof(struct ipv6hdr)))
+ break;
+ ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
+ sizeof(struct ipv6hdr),
+ &l4_hdr, &frag_off);
+ if (unlikely(frag_off))
+ l4_hdr = NEXTHDR_FRAGMENT;
break;
default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum but ip version=%x!\n",
- protocol);
- }
- tx_ring->tx_stats.csum_err++;
- goto no_csum;
+ break;
}
switch (l4_hdr) {
@@ -884,9 +874,10 @@ static void fm10k_tx_csum(struct fm10k_ring *tx_ring,
default:
if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev,
- "partial checksum but l4 proto=%x!\n",
- l4_hdr);
+ "partial checksum, version=%d l4 proto=%x\n",
+ protocol, l4_hdr);
}
+ skb_checksum_help(skb);
tx_ring->tx_stats.csum_err++;
goto no_csum;
}
@@ -912,11 +903,6 @@ static u8 fm10k_tx_desc_flags(struct sk_buff *skb, u32 tx_flags)
/* set type for advanced descriptor with frame checksum insertion */
u32 desc_flags = 0;
- /* set timestamping bits */
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
- likely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
- desc_flags |= FM10K_TXD_FLAG_TIME;
-
/* set checksum offload bits */
desc_flags |= FM10K_SET_FLAG(tx_flags, FM10K_TX_FLAGS_CSUM,
FM10K_TXD_FLAG_CSUM);
@@ -1142,11 +1128,13 @@ static u64 fm10k_get_tx_completed(struct fm10k_ring *ring)
return ring->stats.packets;
}
-static u64 fm10k_get_tx_pending(struct fm10k_ring *ring)
+u64 fm10k_get_tx_pending(struct fm10k_ring *ring)
{
- /* use SW head and tail until we have real hardware */
- u32 head = ring->next_to_clean;
- u32 tail = ring->next_to_use;
+ struct fm10k_intfc *interface = ring->q_vector->interface;
+ struct fm10k_hw *hw = &interface->hw;
+
+ u32 head = fm10k_read_reg(hw, FM10K_TDH(ring->reg_idx));
+ u32 tail = fm10k_read_reg(hw, FM10K_TDT(ring->reg_idx));
return ((head <= tail) ? tail : tail + ring->count) - head;
}
@@ -1198,9 +1186,10 @@ void fm10k_tx_timeout_reset(struct fm10k_intfc *interface)
* fm10k_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
**/
static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
- struct fm10k_ring *tx_ring)
+ struct fm10k_ring *tx_ring, int napi_budget)
{
struct fm10k_intfc *interface = q_vector->interface;
struct fm10k_tx_buffer *tx_buffer;
@@ -1238,7 +1227,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
/* free the skb */
- dev_consume_skb_any(tx_buffer->skb);
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -1409,7 +1398,7 @@ static void fm10k_update_itr(struct fm10k_ring_container *ring_container)
* accounts for changes in the ITR due to PCIe link speed.
*/
itr_round = ACCESS_ONCE(ring_container->itr_scale) + 8;
- avg_wire_size += (1 << itr_round) - 1;
+ avg_wire_size += BIT(itr_round) - 1;
avg_wire_size >>= itr_round;
/* write back value and retain adaptive flag */
@@ -1449,8 +1438,10 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
int per_ring_budget, work_done = 0;
bool clean_complete = true;
- fm10k_for_each_ring(ring, q_vector->tx)
- clean_complete &= fm10k_clean_tx_irq(q_vector, ring);
+ fm10k_for_each_ring(ring, q_vector->tx) {
+ if (!fm10k_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
/* Handle case where we are called by netpoll with a budget of 0 */
if (budget <= 0)
@@ -1468,7 +1459,8 @@ static int fm10k_poll(struct napi_struct *napi, int budget)
int work = fm10k_clean_rx_irq(q_vector, ring, per_ring_budget);
work_done += work;
- clean_complete &= !!(work < per_ring_budget);
+ if (work >= per_ring_budget)
+ clean_complete = false;
}
/* If all work not completed, return budget and keep polling */
@@ -1511,17 +1503,17 @@ static bool fm10k_set_qos_queues(struct fm10k_intfc *interface)
/* set QoS mask and indices */
f = &interface->ring_feature[RING_F_QOS];
f->indices = pcs;
- f->mask = (1 << fls(pcs - 1)) - 1;
+ f->mask = BIT(fls(pcs - 1)) - 1;
/* determine the upper limit for our current DCB mode */
rss_i = interface->hw.mac.max_queues / pcs;
- rss_i = 1 << (fls(rss_i) - 1);
+ rss_i = BIT(fls(rss_i) - 1);
/* set RSS mask and indices */
f = &interface->ring_feature[RING_F_RSS];
rss_i = min_t(u16, rss_i, f->limit);
f->indices = rss_i;
- f->mask = (1 << fls(rss_i - 1)) - 1;
+ f->mask = BIT(fls(rss_i - 1)) - 1;
/* configure pause class to queue mapping */
for (i = 0; i < pcs; i++)
@@ -1551,7 +1543,7 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
/* record indices and power of 2 mask for RSS */
f->indices = rss_i;
- f->mask = (1 << fls(rss_i - 1)) - 1;
+ f->mask = BIT(fls(rss_i - 1)) - 1;
interface->num_rx_queues = rss_i;
interface->num_tx_queues = rss_i;
@@ -1572,17 +1564,29 @@ static bool fm10k_set_rss_queues(struct fm10k_intfc *interface)
**/
static void fm10k_set_num_queues(struct fm10k_intfc *interface)
{
- /* Start with base case */
- interface->num_rx_queues = 1;
- interface->num_tx_queues = 1;
-
+ /* Attempt to setup QoS and RSS first */
if (fm10k_set_qos_queues(interface))
return;
+ /* If we don't have QoS, just fallback to only RSS. */
fm10k_set_rss_queues(interface);
}
/**
+ * fm10k_reset_num_queues - Reset the number of queues to zero
+ * @interface: board private structure
+ *
+ * This function should be called whenever we need to reset the number of
+ * queues after an error condition.
+ */
+static void fm10k_reset_num_queues(struct fm10k_intfc *interface)
+{
+ interface->num_tx_queues = 0;
+ interface->num_rx_queues = 0;
+ interface->num_q_vectors = 0;
+}
+
+/**
* fm10k_alloc_q_vector - Allocate memory for a single interrupt vector
* @interface: board private structure to initialize
* @v_count: q_vectors allocated on interface, used for ring interleaving
@@ -1765,9 +1769,7 @@ static int fm10k_alloc_q_vectors(struct fm10k_intfc *interface)
return 0;
err_out:
- interface->num_tx_queues = 0;
- interface->num_rx_queues = 0;
- interface->num_q_vectors = 0;
+ fm10k_reset_num_queues(interface);
while (v_idx--)
fm10k_free_q_vector(interface, v_idx);
@@ -1787,9 +1789,7 @@ static void fm10k_free_q_vectors(struct fm10k_intfc *interface)
{
int v_idx = interface->num_q_vectors;
- interface->num_tx_queues = 0;
- interface->num_rx_queues = 0;
- interface->num_q_vectors = 0;
+ fm10k_reset_num_queues(interface);
while (v_idx--)
fm10k_free_q_vector(interface, v_idx);
@@ -1858,7 +1858,7 @@ static int fm10k_init_msix_capability(struct fm10k_intfc *interface)
if (v_budget < 0) {
kfree(interface->msix_entries);
interface->msix_entries = NULL;
- return -ENOMEM;
+ return v_budget;
}
/* record the number of queues available for q_vectors */
@@ -1935,7 +1935,7 @@ static void fm10k_assign_rings(struct fm10k_intfc *interface)
static void fm10k_init_reta(struct fm10k_intfc *interface)
{
u16 i, rss_i = interface->ring_feature[RING_F_RSS].indices;
- u32 reta, base;
+ u32 reta;
/* If the Rx flow indirection table has been configured manually, we
* need to maintain it when possible.
@@ -1960,21 +1960,7 @@ static void fm10k_init_reta(struct fm10k_intfc *interface)
}
repopulate_reta:
- /* Populate the redirection table 4 entries at a time. To do this
- * we are generating the results for n and n+2 and then interleaving
- * those with the results with n+1 and n+3.
- */
- for (i = FM10K_RETA_SIZE; i--;) {
- /* first pass generates n and n+2 */
- base = ((i * 0x00040004) + 0x00020000) * rss_i;
- reta = (base & 0x3F803F80) >> 7;
-
- /* second pass generates n+1 and n+3 */
- base += 0x00010001 * rss_i;
- reta |= (base & 0x3F803F80) << 1;
-
- interface->reta[i] = reta;
- }
+ fm10k_write_reta(interface, NULL);
}
/**
@@ -1997,14 +1983,15 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
if (err) {
dev_err(&interface->pdev->dev,
"Unable to initialize MSI-X capability\n");
- return err;
+ goto err_init_msix;
}
/* Allocate memory for queues */
err = fm10k_alloc_q_vectors(interface);
if (err) {
- fm10k_reset_msix_capability(interface);
- return err;
+ dev_err(&interface->pdev->dev,
+ "Unable to allocate queue vectors\n");
+ goto err_alloc_q_vectors;
}
/* Map rings to devices, and map devices to physical queues */
@@ -2014,6 +2001,12 @@ int fm10k_init_queueing_scheme(struct fm10k_intfc *interface)
fm10k_init_reta(interface);
return 0;
+
+err_alloc_q_vectors:
+ fm10k_reset_msix_capability(interface);
+err_init_msix:
+ fm10k_reset_num_queues(interface);
+ return err;
}
/**
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
index 98202c3d591c..c9dfa6564fcf 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
index 245a0a3dc32e..35c1dbad1330 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_mbx.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -41,6 +41,8 @@ struct fm10k_mbx_info;
#define FM10K_MBX_ACK_INTERRUPT 0x00000010
#define FM10K_MBX_INTERRUPT_ENABLE 0x00000020
#define FM10K_MBX_INTERRUPT_DISABLE 0x00000040
+#define FM10K_MBX_GLOBAL_REQ_INTERRUPT 0x00000200
+#define FM10K_MBX_GLOBAL_ACK_INTERRUPT 0x00000400
#define FM10K_MBICR(_n) ((_n) + 0x18840)
#define FM10K_GMBX 0x18842
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index d09a8dd71fc2..20a5bbe3f536 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -20,9 +20,7 @@
#include "fm10k.h"
#include <linux/vmalloc.h>
-#ifdef CONFIG_FM10K_VXLAN
-#include <net/vxlan.h>
-#endif /* CONFIG_FM10K_VXLAN */
+#include <net/udp_tunnel.h>
/**
* fm10k_setup_tx_resources - allocate Tx resources (Descriptors)
@@ -243,9 +241,6 @@ void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface)
for (i = 0; i < interface->num_tx_queues; i++)
fm10k_clean_tx_ring(interface->tx_ring[i]);
-
- /* remove any stale timestamp buffers and free them */
- skb_queue_purge(&interface->ts_tx_skb_queue);
}
/**
@@ -437,28 +432,30 @@ static void fm10k_restore_vxlan_port(struct fm10k_intfc *interface)
/**
* fm10k_add_vxlan_port
* @netdev: network interface device structure
- * @sa_family: Address family of new port
- * @port: port number used for VXLAN
+ * @ti: Tunnel endpoint information
*
- * This funciton is called when a new VXLAN interface has added a new port
+ * This function is called when a new VXLAN interface has added a new port
* number to the range that is currently in use for VXLAN. The new port
* number is always added to the tail so that the port number list should
* match the order in which the ports were allocated. The head of the list
* is always used as the VXLAN port number for offloads.
**/
static void fm10k_add_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port) {
+ struct udp_tunnel_info *ti)
+{
struct fm10k_intfc *interface = netdev_priv(dev);
struct fm10k_vxlan_port *vxlan_port;
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
/* only the PF supports configuring tunnels */
if (interface->hw.mac.type != fm10k_mac_pf)
return;
/* existing ports are pulled out so our new entry is always last */
fm10k_vxlan_port_for_each(vxlan_port, interface) {
- if ((vxlan_port->port == port) &&
- (vxlan_port->sa_family == sa_family)) {
+ if ((vxlan_port->port == ti->port) &&
+ (vxlan_port->sa_family == ti->sa_family)) {
list_del(&vxlan_port->list);
goto insert_tail;
}
@@ -468,8 +465,8 @@ static void fm10k_add_vxlan_port(struct net_device *dev,
vxlan_port = kmalloc(sizeof(*vxlan_port), GFP_ATOMIC);
if (!vxlan_port)
return;
- vxlan_port->port = port;
- vxlan_port->sa_family = sa_family;
+ vxlan_port->port = ti->port;
+ vxlan_port->sa_family = ti->sa_family;
insert_tail:
/* add new port value to list */
@@ -481,26 +478,28 @@ insert_tail:
/**
* fm10k_del_vxlan_port
* @netdev: network interface device structure
- * @sa_family: Address family of freed port
- * @port: port number used for VXLAN
+ * @ti: Tunnel endpoint information
*
- * This funciton is called when a new VXLAN interface has freed a port
+ * This function is called when a new VXLAN interface has freed a port
* number from the range that is currently in use for VXLAN. The freed
* port is removed from the list and the new head is used to determine
* the port number for offloads.
**/
static void fm10k_del_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port) {
+ struct udp_tunnel_info *ti)
+{
struct fm10k_intfc *interface = netdev_priv(dev);
struct fm10k_vxlan_port *vxlan_port;
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
if (interface->hw.mac.type != fm10k_mac_pf)
return;
/* find the port in the list and free it */
fm10k_vxlan_port_for_each(vxlan_port, interface) {
- if ((vxlan_port->port == port) &&
- (vxlan_port->sa_family == sa_family)) {
+ if ((vxlan_port->port == ti->port) &&
+ (vxlan_port->sa_family == ti->sa_family)) {
list_del(&vxlan_port->list);
kfree(vxlan_port);
break;
@@ -556,10 +555,8 @@ int fm10k_open(struct net_device *netdev)
if (err)
goto err_set_queues;
-#ifdef CONFIG_FM10K_VXLAN
/* update VXLAN port configuration */
- vxlan_get_rx_port(netdev);
-#endif
+ udp_tunnel_get_rx_info(netdev);
fm10k_up(interface);
@@ -660,10 +657,6 @@ static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
__skb_put(skb, pad_len);
}
- /* prepare packet for hardware time stamping */
- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
- fm10k_ts_tx_enqueue(interface, skb);
-
if (r_idx >= interface->num_tx_queues)
r_idx %= interface->num_tx_queues;
@@ -884,7 +877,7 @@ static int __fm10k_uc_sync(struct net_device *dev,
return -EADDRNOTAVAIL;
/* update table with current entries */
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+ for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
err = hw->mac.ops.update_uc_addr(hw, glort, addr,
@@ -947,7 +940,7 @@ static int __fm10k_mc_sync(struct net_device *dev,
u16 vid, glort = interface->glort;
/* update table with current entries */
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+ for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
hw->mac.ops.update_mc_addr(hw, glort, addr, vid, sync);
@@ -1002,11 +995,8 @@ static void fm10k_set_rx_mode(struct net_device *dev)
}
/* synchronize all of the addresses */
- if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
- __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
- if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
- __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
- }
+ __dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
+ __dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
fm10k_mbx_unlock(interface);
}
@@ -1044,7 +1034,7 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
hw->mac.ops.update_vlan(hw, 0, 0, true);
/* update table with current entries */
- for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 0;
+ for (vid = hw->mac.default_vid ? fm10k_find_next_vlan(interface, 0) : 1;
vid < VLAN_N_VID;
vid = fm10k_find_next_vlan(interface, vid)) {
hw->mac.ops.update_vlan(hw, vid, 0, true);
@@ -1056,11 +1046,8 @@ void fm10k_restore_rx_state(struct fm10k_intfc *interface)
hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
/* synchronize all of the addresses */
- if (xcast_mode != FM10K_XCAST_MODE_PROMISC) {
- __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
- if (xcast_mode != FM10K_XCAST_MODE_ALLMULTI)
- __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
- }
+ __dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
+ __dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
fm10k_mbx_unlock(interface);
@@ -1213,18 +1200,6 @@ static int __fm10k_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
return fm10k_setup_tc(dev, tc->tc);
}
-static int fm10k_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
-{
- switch (cmd) {
- case SIOCGHWTSTAMP:
- return fm10k_get_ts_config(netdev, ifr);
- case SIOCSHWTSTAMP:
- return fm10k_set_ts_config(netdev, ifr);
- default:
- return -EOPNOTSUPP;
- }
-}
-
static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
struct fm10k_l2_accel *l2_accel)
{
@@ -1400,9 +1375,8 @@ static const struct net_device_ops fm10k_netdev_ops = {
.ndo_set_vf_vlan = fm10k_ndo_set_vf_vlan,
.ndo_set_vf_rate = fm10k_ndo_set_vf_bw,
.ndo_get_vf_config = fm10k_ndo_get_vf_config,
- .ndo_add_vxlan_port = fm10k_add_vxlan_port,
- .ndo_del_vxlan_port = fm10k_del_vxlan_port,
- .ndo_do_ioctl = fm10k_ioctl,
+ .ndo_udp_tunnel_add = fm10k_add_vxlan_port,
+ .ndo_udp_tunnel_del = fm10k_del_vxlan_port,
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1429,7 +1403,7 @@ struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info)
/* configure default debug level */
interface = netdev_priv(dev);
- interface->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+ interface->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
/* configure default features */
dev->features |= NETIF_F_IP_CSUM |
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index 4eb7a6fa6b0d..774a5654bf42 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -99,7 +99,7 @@ void fm10k_service_event_schedule(struct fm10k_intfc *interface)
static void fm10k_service_event_complete(struct fm10k_intfc *interface)
{
- BUG_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
+ WARN_ON(!test_bit(__FM10K_SERVICE_SCHED, &interface->state));
/* flush memory to make sure state is correct before next watchog */
smp_mb__before_atomic();
@@ -123,11 +123,24 @@ static void fm10k_service_timer(unsigned long data)
static void fm10k_detach_subtask(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
+ u32 __iomem *hw_addr;
+ u32 value;
/* do nothing if device is still present or hw_addr is set */
if (netif_device_present(netdev) || interface->hw.hw_addr)
return;
+ /* check the real address space to see if we've recovered */
+ hw_addr = READ_ONCE(interface->uc_addr);
+ value = readl(hw_addr);
+ if ((~value)) {
+ interface->hw.hw_addr = interface->uc_addr;
+ netif_device_attach(netdev);
+ interface->flags |= FM10K_FLAG_RESET_REQUESTED;
+ netdev_warn(netdev, "PCIe link restored, device now attached\n");
+ return;
+ }
+
rtnl_lock();
if (netif_running(netdev))
@@ -136,16 +149,14 @@ static void fm10k_detach_subtask(struct fm10k_intfc *interface)
rtnl_unlock();
}
-static void fm10k_reinit(struct fm10k_intfc *interface)
+static void fm10k_prepare_for_reset(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
- struct fm10k_hw *hw = &interface->hw;
- int err;
WARN_ON(in_interrupt());
/* put off any impending NetWatchDogTimeout */
- netdev->trans_start = jiffies;
+ netif_trans_update(netdev);
while (test_and_set_bit(__FM10K_RESETTING, &interface->state))
usleep_range(1000, 2000);
@@ -165,6 +176,19 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
/* delay any future reset requests */
interface->last_reset = jiffies + (10 * HZ);
+ rtnl_unlock();
+}
+
+static int fm10k_handle_reset(struct fm10k_intfc *interface)
+{
+ struct net_device *netdev = interface->netdev;
+ struct fm10k_hw *hw = &interface->hw;
+ int err;
+
+ rtnl_lock();
+
+ pci_set_master(interface->pdev);
+
/* reset and initialize the hardware so it is in a known state */
err = hw->mac.ops.reset_hw(hw);
if (err) {
@@ -185,7 +209,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
goto reinit_err;
}
- /* reassociate interrupts */
+ /* re-associate interrupts */
err = fm10k_mbx_request_irq(interface);
if (err)
goto err_mbx_irq;
@@ -209,9 +233,6 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
netdev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
- /* reset clock */
- fm10k_ts_reset(interface);
-
err = netif_running(netdev) ? fm10k_open(netdev) : 0;
if (err)
goto err_open;
@@ -222,7 +243,7 @@ static void fm10k_reinit(struct fm10k_intfc *interface)
clear_bit(__FM10K_RESETTING, &interface->state);
- return;
+ return err;
err_open:
fm10k_mbx_free_irq(interface);
err_mbx_irq:
@@ -233,6 +254,20 @@ reinit_err:
rtnl_unlock();
clear_bit(__FM10K_RESETTING, &interface->state);
+
+ return err;
+}
+
+static void fm10k_reinit(struct fm10k_intfc *interface)
+{
+ int err;
+
+ fm10k_prepare_for_reset(interface);
+
+ err = fm10k_handle_reset(interface);
+ if (err)
+ dev_err(&interface->pdev->dev,
+ "fm10k_handle_reset failed: %d\n", err);
}
static void fm10k_reset_subtask(struct fm10k_intfc *interface)
@@ -375,12 +410,19 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
u64 bytes, pkts;
int i;
+ /* ensure only one thread updates stats at a time */
+ if (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state))
+ return;
+
/* do not allow stats update via service task for next second */
interface->next_stats_update = jiffies + HZ;
/* gather some stats to the interface struct that are per queue */
for (bytes = 0, pkts = 0, i = 0; i < interface->num_tx_queues; i++) {
- struct fm10k_ring *tx_ring = interface->tx_ring[i];
+ struct fm10k_ring *tx_ring = READ_ONCE(interface->tx_ring[i]);
+
+ if (!tx_ring)
+ continue;
restart_queue += tx_ring->tx_stats.restart_queue;
tx_busy += tx_ring->tx_stats.tx_busy;
@@ -399,7 +441,10 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
/* gather some stats to the interface struct that are per queue */
for (bytes = 0, pkts = 0, i = 0; i < interface->num_rx_queues; i++) {
- struct fm10k_ring *rx_ring = interface->rx_ring[i];
+ struct fm10k_ring *rx_ring = READ_ONCE(interface->rx_ring[i]);
+
+ if (!rx_ring)
+ continue;
bytes += rx_ring->stats.bytes;
pkts += rx_ring->stats.packets;
@@ -446,6 +491,8 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
/* Fill out the OS statistics structure */
net_stats->rx_errors = rx_errors;
net_stats->rx_dropped = interface->stats.nodesc_drop.count;
+
+ clear_bit(__FM10K_UPDATING_STATS, &interface->state);
}
/**
@@ -559,7 +606,6 @@ static void fm10k_service_task(struct work_struct *work)
/* tasks only run when interface is up */
fm10k_watchdog_subtask(interface);
fm10k_check_hang_subtask(interface);
- fm10k_ts_tx_subtask(interface);
/* release lock on service events to allow scheduling next event */
fm10k_service_event_complete(interface);
@@ -579,7 +625,7 @@ static void fm10k_configure_tx_ring(struct fm10k_intfc *interface,
u64 tdba = ring->dma;
u32 size = ring->count * sizeof(struct fm10k_tx_desc);
u32 txint = FM10K_INT_MAP_DISABLE;
- u32 txdctl = FM10K_TXDCTL_ENABLE | (1 << FM10K_TXDCTL_MAX_TIME_SHIFT);
+ u32 txdctl = BIT(FM10K_TXDCTL_MAX_TIME_SHIFT) | FM10K_TXDCTL_ENABLE;
u8 reg_idx = ring->reg_idx;
/* disable queue to avoid issues while updating state */
@@ -730,7 +776,7 @@ static void fm10k_configure_rx_ring(struct fm10k_intfc *interface,
if (interface->pfc_en)
rx_pause = interface->pfc_en;
#endif
- if (!(rx_pause & (1 << ring->qos_pc)))
+ if (!(rx_pause & BIT(ring->qos_pc)))
rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
@@ -779,7 +825,7 @@ void fm10k_update_rx_drop_en(struct fm10k_intfc *interface)
u32 rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
u8 reg_idx = ring->reg_idx;
- if (!(rx_pause & (1 << ring->qos_pc)))
+ if (!(rx_pause & BIT(ring->qos_pc)))
rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
fm10k_write_reg(hw, FM10K_RXDCTL(reg_idx), rxdctl);
@@ -903,8 +949,8 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
/* re-enable mailbox interrupt and indicate 20us delay */
fm10k_write_reg(hw, FM10K_VFITR(FM10K_MBX_VECTOR),
- FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >>
- hw->mac.itr_scale));
+ (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
+ FM10K_ITR_ENABLE);
/* service upstream mailbox */
if (fm10k_mbx_trylock(interface)) {
@@ -1065,7 +1111,7 @@ static void fm10k_reset_drop_on_empty(struct fm10k_intfc *interface, u32 eicr)
if (maxholdq)
fm10k_write_reg(hw, FM10K_MAXHOLDQ(7), maxholdq);
for (q = 255;;) {
- if (maxholdq & (1 << 31)) {
+ if (maxholdq & BIT(31)) {
if (q < FM10K_MAX_QUEUES_PF) {
interface->rx_overrun_pf++;
fm10k_write_reg(hw, FM10K_RXDCTL(q), rxdctl);
@@ -1135,22 +1181,24 @@ static irqreturn_t fm10k_msix_mbx_pf(int __always_unused irq, void *data)
/* re-enable mailbox interrupt and indicate 20us delay */
fm10k_write_reg(hw, FM10K_ITR(FM10K_MBX_VECTOR),
- FM10K_ITR_ENABLE | (FM10K_MBX_INT_DELAY >>
- hw->mac.itr_scale));
+ (FM10K_MBX_INT_DELAY >> hw->mac.itr_scale) |
+ FM10K_ITR_ENABLE);
return IRQ_HANDLED;
}
void fm10k_mbx_free_irq(struct fm10k_intfc *interface)
{
- struct msix_entry *entry = &interface->msix_entries[FM10K_MBX_VECTOR];
struct fm10k_hw *hw = &interface->hw;
+ struct msix_entry *entry;
int itr_reg;
/* no mailbox IRQ to free if MSI-X is not enabled */
if (!interface->msix_entries)
return;
+ entry = &interface->msix_entries[FM10K_MBX_VECTOR];
+
/* disconnect the mailbox */
hw->mbx.ops.disconnect(hw, &hw->mbx);
@@ -1202,25 +1250,6 @@ static s32 fm10k_mbx_mac_addr(struct fm10k_hw *hw, u32 **results,
return 0;
}
-static s32 fm10k_1588_msg_vf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info __always_unused *mbx)
-{
- struct fm10k_intfc *interface;
- u64 timestamp;
- s32 err;
-
- err = fm10k_tlv_attr_get_u64(results[FM10K_1588_MSG_TIMESTAMP],
- &timestamp);
- if (err)
- return err;
-
- interface = container_of(hw, struct fm10k_intfc, hw);
-
- fm10k_ts_tx_hwtstamp(interface, 0, timestamp);
-
- return 0;
-}
-
/* generic error handler for mailbox issues */
static s32 fm10k_mbx_error(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info __always_unused *mbx)
@@ -1241,7 +1270,6 @@ static const struct fm10k_msg_data vf_mbx_data[] = {
FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_mbx_mac_addr),
FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
- FM10K_VF_MSG_1588_HANDLER(fm10k_1588_msg_vf),
FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
};
@@ -1253,7 +1281,7 @@ static int fm10k_mbx_request_irq_vf(struct fm10k_intfc *interface)
int err;
/* Use timer0 for interrupt moderation on the mailbox */
- u32 itr = FM10K_INT_MAP_TIMER0 | entry->entry;
+ u32 itr = entry->entry | FM10K_INT_MAP_TIMER0;
/* register mailbox handlers */
err = hw->mbx.ops.register_handlers(&hw->mbx, vf_mbx_data);
@@ -1285,11 +1313,40 @@ static s32 fm10k_lport_map(struct fm10k_hw *hw, u32 **results,
u32 dglort_map = hw->mac.dglort_map;
s32 err;
+ interface = container_of(hw, struct fm10k_intfc, hw);
+
+ err = fm10k_msg_err_pf(hw, results, mbx);
+ if (!err && hw->swapi.status) {
+ /* force link down for a reasonable delay */
+ interface->link_down_event = jiffies + (2 * HZ);
+ set_bit(__FM10K_LINK_DOWN, &interface->state);
+
+ /* reset dglort_map back to no config */
+ hw->mac.dglort_map = FM10K_DGLORTMAP_NONE;
+
+ fm10k_service_event_schedule(interface);
+
+ /* prevent overloading kernel message buffer */
+ if (interface->lport_map_failed)
+ return 0;
+
+ interface->lport_map_failed = true;
+
+ if (hw->swapi.status == FM10K_MSG_ERR_PEP_NOT_SCHEDULED)
+ dev_warn(&interface->pdev->dev,
+ "cannot obtain link because the host interface is configured for a PCIe host interface bandwidth of zero\n");
+ dev_warn(&interface->pdev->dev,
+ "request logical port map failed: %d\n",
+ hw->swapi.status);
+
+ return 0;
+ }
+
err = fm10k_msg_lport_map_pf(hw, results, mbx);
if (err)
return err;
- interface = container_of(hw, struct fm10k_intfc, hw);
+ interface->lport_map_failed = false;
/* we need to reset if port count was just updated */
if (dglort_map != hw->mac.dglort_map)
@@ -1339,68 +1396,6 @@ static s32 fm10k_update_pvid(struct fm10k_hw *hw, u32 **results,
return 0;
}
-static s32 fm10k_1588_msg_pf(struct fm10k_hw *hw, u32 **results,
- struct fm10k_mbx_info __always_unused *mbx)
-{
- struct fm10k_swapi_1588_timestamp timestamp;
- struct fm10k_iov_data *iov_data;
- struct fm10k_intfc *interface;
- u16 sglort, vf_idx;
- s32 err;
-
- err = fm10k_tlv_attr_get_le_struct(
- results[FM10K_PF_ATTR_ID_1588_TIMESTAMP],
- &timestamp, sizeof(timestamp));
- if (err)
- return err;
-
- interface = container_of(hw, struct fm10k_intfc, hw);
-
- if (timestamp.dglort) {
- fm10k_ts_tx_hwtstamp(interface, timestamp.dglort,
- le64_to_cpu(timestamp.egress));
- return 0;
- }
-
- /* either dglort or sglort must be set */
- if (!timestamp.sglort)
- return FM10K_ERR_PARAM;
-
- /* verify GLORT is at least one of the ones we own */
- sglort = le16_to_cpu(timestamp.sglort);
- if (!fm10k_glort_valid_pf(hw, sglort))
- return FM10K_ERR_PARAM;
-
- if (sglort == interface->glort) {
- fm10k_ts_tx_hwtstamp(interface, 0,
- le64_to_cpu(timestamp.ingress));
- return 0;
- }
-
- /* if there is no iov_data then there is no mailboxes to process */
- if (!ACCESS_ONCE(interface->iov_data))
- return FM10K_ERR_PARAM;
-
- rcu_read_lock();
-
- /* notify VF if this timestamp belongs to it */
- iov_data = interface->iov_data;
- vf_idx = (hw->mac.dglort_map & FM10K_DGLORTMAP_NONE) - sglort;
-
- if (!iov_data || vf_idx >= iov_data->num_vfs) {
- err = FM10K_ERR_PARAM;
- goto err_unlock;
- }
-
- err = hw->iov.ops.report_timestamp(hw, &iov_data->vf_info[vf_idx],
- le64_to_cpu(timestamp.ingress));
-
-err_unlock:
- rcu_read_unlock();
-
- return err;
-}
-
static const struct fm10k_msg_data pf_mbx_data[] = {
FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
@@ -1408,7 +1403,6 @@ static const struct fm10k_msg_data pf_mbx_data[] = {
FM10K_PF_MSG_ERR_HANDLER(LPORT_CREATE, fm10k_msg_err_pf),
FM10K_PF_MSG_ERR_HANDLER(LPORT_DELETE, fm10k_msg_err_pf),
FM10K_PF_MSG_UPDATE_PVID_HANDLER(fm10k_update_pvid),
- FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(fm10k_1588_msg_pf),
FM10K_TLV_MSG_ERROR_HANDLER(fm10k_mbx_error),
};
@@ -1420,8 +1414,8 @@ static int fm10k_mbx_request_irq_pf(struct fm10k_intfc *interface)
int err;
/* Use timer0 for interrupt moderation on the mailbox */
- u32 mbx_itr = FM10K_INT_MAP_TIMER0 | entry->entry;
- u32 other_itr = FM10K_INT_MAP_IMMEDIATE | entry->entry;
+ u32 mbx_itr = entry->entry | FM10K_INT_MAP_TIMER0;
+ u32 other_itr = entry->entry | FM10K_INT_MAP_IMMEDIATE;
/* register mailbox handlers */
err = hw->mbx.ops.register_handlers(&hw->mbx, pf_mbx_data);
@@ -1622,6 +1616,9 @@ void fm10k_up(struct fm10k_intfc *interface)
/* configure interrupts */
hw->mac.ops.update_int_moderator(hw);
+ /* enable statistics capture again */
+ clear_bit(__FM10K_UPDATING_STATS, &interface->state);
+
/* clear down bit to indicate we are ready to go */
clear_bit(__FM10K_DOWN, &interface->state);
@@ -1654,9 +1651,11 @@ void fm10k_down(struct fm10k_intfc *interface)
{
struct net_device *netdev = interface->netdev;
struct fm10k_hw *hw = &interface->hw;
+ int err, i = 0, count = 0;
/* signal that we are down to the interrupt handler and service task */
- set_bit(__FM10K_DOWN, &interface->state);
+ if (test_and_set_bit(__FM10K_DOWN, &interface->state))
+ return;
/* call carrier off first to avoid false dev_watchdog timeouts */
netif_carrier_off(netdev);
@@ -1668,17 +1667,58 @@ void fm10k_down(struct fm10k_intfc *interface)
/* reset Rx filters */
fm10k_reset_rx_state(interface);
- /* allow 10ms for device to quiesce */
- usleep_range(10000, 20000);
-
/* disable polling routines */
fm10k_napi_disable_all(interface);
/* capture stats one last time before stopping interface */
fm10k_update_stats(interface);
+ /* prevent updating statistics while we're down */
+ while (test_and_set_bit(__FM10K_UPDATING_STATS, &interface->state))
+ usleep_range(1000, 2000);
+
+ /* skip waiting for TX DMA if we lost PCIe link */
+ if (FM10K_REMOVED(hw->hw_addr))
+ goto skip_tx_dma_drain;
+
+ /* In some rare circumstances it can take a while for Tx queues to
+ * quiesce and be fully disabled. Attempt to .stop_hw() first, and
+ * then if we get ERR_REQUESTS_PENDING, go ahead and wait in a loop
+ * until the Tx queues have emptied, or until a number of retries. If
+ * we fail to clear within the retry loop, we will issue a warning
+ * indicating that Tx DMA is probably hung. Note this means we call
+ * .stop_hw() twice but this shouldn't cause any problems.
+ */
+ err = hw->mac.ops.stop_hw(hw);
+ if (err != FM10K_ERR_REQUESTS_PENDING)
+ goto skip_tx_dma_drain;
+
+#define TX_DMA_DRAIN_RETRIES 25
+ for (count = 0; count < TX_DMA_DRAIN_RETRIES; count++) {
+ usleep_range(10000, 20000);
+
+ /* start checking at the last ring to have pending Tx */
+ for (; i < interface->num_tx_queues; i++)
+ if (fm10k_get_tx_pending(interface->tx_ring[i]))
+ break;
+
+ /* if all the queues are drained, we can break now */
+ if (i == interface->num_tx_queues)
+ break;
+ }
+
+ if (count >= TX_DMA_DRAIN_RETRIES)
+ dev_err(&interface->pdev->dev,
+ "Tx queues failed to drain after %d tries. Tx DMA is probably hung.\n",
+ count);
+skip_tx_dma_drain:
/* Disable DMA engine for Tx/Rx */
- hw->mac.ops.stop_hw(hw);
+ err = hw->mac.ops.stop_hw(hw);
+ if (err == FM10K_ERR_REQUESTS_PENDING)
+ dev_err(&interface->pdev->dev,
+ "due to pending requests hw was not shut down gracefully\n");
+ else if (err)
+ dev_err(&interface->pdev->dev, "stop_hw failed: %d\n", err);
/* free any buffers still on the rings */
fm10k_clean_all_tx_rings(interface);
@@ -1776,35 +1816,17 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
netdev->addr_assign_type |= NET_ADDR_RANDOM;
}
- memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, hw->mac.addr, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, hw->mac.addr);
+ ether_addr_copy(netdev->perm_addr, hw->mac.addr);
if (!is_valid_ether_addr(netdev->perm_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
return -EIO;
}
- /* assign BAR 4 resources for use with PTP */
- if (fm10k_read_reg(hw, FM10K_CTRL) & FM10K_CTRL_BAR4_ALLOWED)
- interface->sw_addr = ioremap(pci_resource_start(pdev, 4),
- pci_resource_len(pdev, 4));
- hw->sw_addr = interface->sw_addr;
-
/* initialize DCBNL interface */
fm10k_dcbnl_set_ops(netdev);
- /* Initialize service timer and service task */
- set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
- setup_timer(&interface->service_timer, &fm10k_service_timer,
- (unsigned long)interface);
- INIT_WORK(&interface->service_task, fm10k_service_task);
-
- /* kick off service timer now, even when interface is down */
- mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
-
- /* Intitialize timestamp data */
- fm10k_ts_init(interface);
-
/* set default ring sizes */
interface->tx_ring_count = FM10K_DEFAULT_TXD;
interface->rx_ring_count = FM10K_DEFAULT_RXD;
@@ -1821,6 +1843,7 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
/* Start off interface as being down */
set_bit(__FM10K_DOWN, &interface->state);
+ set_bit(__FM10K_UPDATING_STATS, &interface->state);
return 0;
}
@@ -1940,10 +1963,7 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_dma;
}
- err = pci_request_selected_regions(pdev,
- pci_select_bars(pdev,
- IORESOURCE_MEM),
- fm10k_driver_name);
+ err = pci_request_mem_regions(pdev, fm10k_driver_name);
if (err) {
dev_err(&pdev->dev,
"pci_request_selected_regions failed: %d\n", err);
@@ -1987,6 +2007,12 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ /* the mbx interrupt might attempt to schedule the service task, so we
+ * must ensure it is disabled since we haven't yet requested the timer
+ * or work item.
+ */
+ set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+
err = fm10k_mbx_request_irq(interface);
if (err)
goto err_mbx_interrupt;
@@ -2006,8 +2032,15 @@ static int fm10k_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* stop all the transmit queues from transmitting until link is up */
netif_tx_stop_all_queues(netdev);
- /* Register PTP interface */
- fm10k_ptp_register(interface);
+ /* Initialize service timer and service task late in order to avoid
+ * cleanup issues.
+ */
+ setup_timer(&interface->service_timer, &fm10k_service_timer,
+ (unsigned long)interface);
+ INIT_WORK(&interface->service_task, fm10k_service_task);
+
+ /* kick off service timer now, even when interface is down */
+ mod_timer(&interface->service_timer, (HZ * 2) + jiffies);
/* print warning for non-optimal configurations */
fm10k_slot_warn(interface);
@@ -2034,8 +2067,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_netdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -2065,9 +2097,6 @@ static void fm10k_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
- /* cleanup timestamp handling */
- fm10k_ptp_unregister(interface);
-
/* release VFs */
fm10k_iov_disable(pdev);
@@ -2086,14 +2115,55 @@ static void fm10k_remove(struct pci_dev *pdev)
free_netdev(netdev);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
}
+static void fm10k_prepare_suspend(struct fm10k_intfc *interface)
+{
+ /* the watchdog task reads from registers, which might appear like
+ * a surprise remove if the PCIe device is disabled while we're
+ * stopped. We stop the watchdog task until after we resume software
+ * activity.
+ */
+ set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ cancel_work_sync(&interface->service_task);
+
+ fm10k_prepare_for_reset(interface);
+}
+
+static int fm10k_handle_resume(struct fm10k_intfc *interface)
+{
+ struct fm10k_hw *hw = &interface->hw;
+ int err;
+
+ /* reset statistics starting values */
+ hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
+
+ err = fm10k_handle_reset(interface);
+ if (err)
+ return err;
+
+ /* assume host is not ready, to prevent race with watchdog in case we
+ * actually don't have connection to the switch
+ */
+ interface->host_ready = false;
+ fm10k_watchdog_host_not_ready(interface);
+
+ /* force link to stay down for a second to prevent link flutter */
+ interface->link_down_event = jiffies + (HZ);
+ set_bit(__FM10K_LINK_DOWN, &interface->state);
+
+ /* clear the service task disable bit to allow service task to start */
+ clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
+ fm10k_service_event_schedule(interface);
+
+ return err;
+}
+
#ifdef CONFIG_PM
/**
* fm10k_resume - Restore device to pre-sleep state
@@ -2130,63 +2200,13 @@ static int fm10k_resume(struct pci_dev *pdev)
/* refresh hw_addr in case it was dropped */
hw->hw_addr = interface->uc_addr;
- /* reset hardware to known state */
- err = hw->mac.ops.init_hw(&interface->hw);
- if (err) {
- dev_err(&pdev->dev, "init_hw failed: %d\n", err);
- return err;
- }
-
- /* reset statistics starting values */
- hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
-
- /* reset clock */
- fm10k_ts_reset(interface);
-
- rtnl_lock();
-
- err = fm10k_init_queueing_scheme(interface);
- if (err)
- goto err_queueing_scheme;
-
- err = fm10k_mbx_request_irq(interface);
- if (err)
- goto err_mbx_irq;
-
- err = fm10k_hw_ready(interface);
+ err = fm10k_handle_resume(interface);
if (err)
- goto err_open;
-
- err = netif_running(netdev) ? fm10k_open(netdev) : 0;
- if (err)
- goto err_open;
-
- rtnl_unlock();
-
- /* assume host is not ready, to prevent race with watchdog in case we
- * actually don't have connection to the switch
- */
- interface->host_ready = false;
- fm10k_watchdog_host_not_ready(interface);
-
- /* clear the service task disable bit to allow service task to start */
- clear_bit(__FM10K_SERVICE_DISABLE, &interface->state);
- fm10k_service_event_schedule(interface);
-
- /* restore SR-IOV interface */
- fm10k_iov_resume(pdev);
+ return err;
netif_device_attach(netdev);
return 0;
-err_open:
- fm10k_mbx_free_irq(interface);
-err_mbx_irq:
- fm10k_clear_queueing_scheme(interface);
-err_queueing_scheme:
- rtnl_unlock();
-
- return err;
}
/**
@@ -2206,27 +2226,7 @@ static int fm10k_suspend(struct pci_dev *pdev,
netif_device_detach(netdev);
- fm10k_iov_suspend(pdev);
-
- /* the watchdog tasks may read registers, which will appear like a
- * surprise-remove event once the PCI device is disabled. This will
- * cause us to close the netdevice, so we don't retain the open/closed
- * state post-resume. Prevent this by disabling the service task while
- * suspended, until we actually resume.
- */
- set_bit(__FM10K_SERVICE_DISABLE, &interface->state);
- cancel_work_sync(&interface->service_task);
-
- rtnl_lock();
-
- if (netif_running(netdev))
- fm10k_close(netdev);
-
- fm10k_mbx_free_irq(interface);
-
- fm10k_clear_queueing_scheme(interface);
-
- rtnl_unlock();
+ fm10k_prepare_suspend(interface);
err = pci_save_state(pdev);
if (err)
@@ -2259,15 +2259,7 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
- if (netif_running(netdev))
- fm10k_close(netdev);
-
- /* free interrupts */
- fm10k_clear_queueing_scheme(interface);
-
- fm10k_mbx_free_irq(interface);
-
- pci_disable_device(pdev);
+ fm10k_prepare_suspend(interface);
/* Request a slot reset. */
return PCI_ERS_RESULT_NEED_RESET;
@@ -2281,7 +2273,6 @@ static pci_ers_result_t fm10k_io_error_detected(struct pci_dev *pdev,
*/
static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
{
- struct fm10k_intfc *interface = pci_get_drvdata(pdev);
pci_ers_result_t result;
if (pci_enable_device_mem(pdev)) {
@@ -2299,12 +2290,6 @@ static pci_ers_result_t fm10k_io_slot_reset(struct pci_dev *pdev)
pci_wake_from_d3(pdev, false);
- /* refresh hw_addr in case it was dropped */
- interface->hw.hw_addr = interface->uc_addr;
-
- interface->flags |= FM10K_FLAG_RESET_REQUESTED;
- fm10k_service_event_schedule(interface);
-
result = PCI_ERS_RESULT_RECOVERED;
}
@@ -2324,46 +2309,54 @@ static void fm10k_io_resume(struct pci_dev *pdev)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
struct net_device *netdev = interface->netdev;
- struct fm10k_hw *hw = &interface->hw;
- int err = 0;
-
- /* reset hardware to known state */
- err = hw->mac.ops.init_hw(&interface->hw);
- if (err) {
- dev_err(&pdev->dev, "init_hw failed: %d\n", err);
- return;
- }
-
- /* reset statistics starting values */
- hw->mac.ops.rebind_hw_stats(hw, &interface->stats);
+ int err;
- err = fm10k_init_queueing_scheme(interface);
- if (err) {
- dev_err(&interface->pdev->dev,
- "init_queueing_scheme failed: %d\n", err);
- return;
- }
+ err = fm10k_handle_resume(interface);
- /* reassociate interrupts */
- fm10k_mbx_request_irq(interface);
+ if (err)
+ dev_warn(&pdev->dev,
+ "fm10k_io_resume failed: %d\n", err);
+ else
+ netif_device_attach(netdev);
+}
- /* reset clock */
- fm10k_ts_reset(interface);
+/**
+ * fm10k_io_reset_notify - called when PCI function is reset
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the PCI function is reset such as from
+ * /sys/class/net/<enpX>/device/reset or similar. When prepare is true, it
+ * means we should prepare for a function reset. If prepare is false, it means
+ * the function reset just occurred.
+ */
+static void fm10k_io_reset_notify(struct pci_dev *pdev, bool prepare)
+{
+ struct fm10k_intfc *interface = pci_get_drvdata(pdev);
+ int err = 0;
- if (netif_running(netdev))
- err = fm10k_open(netdev);
+ if (prepare) {
+ /* warn incase we have any active VF devices */
+ if (pci_num_vf(pdev))
+ dev_warn(&pdev->dev,
+ "PCIe FLR may cause issues for any active VF devices\n");
- /* final check of hardware state before registering the interface */
- err = err ? : fm10k_hw_ready(interface);
+ fm10k_prepare_suspend(interface);
+ } else {
+ err = fm10k_handle_resume(interface);
+ }
- if (!err)
- netif_device_attach(netdev);
+ if (err) {
+ dev_warn(&pdev->dev,
+ "fm10k_io_reset_notify failed: %d\n", err);
+ netif_device_detach(interface->netdev);
+ }
}
static const struct pci_error_handlers fm10k_err_handler = {
.error_detected = fm10k_io_error_detected,
.slot_reset = fm10k_io_slot_reset,
.resume = fm10k_io_resume,
+ .reset_notify = fm10k_io_reset_notify,
};
static struct pci_driver fm10k_driver = {
@@ -2382,7 +2375,7 @@ static struct pci_driver fm10k_driver = {
/**
* fm10k_register_pci_driver - register driver interface
*
- * This funciton is called on module load in order to register the driver.
+ * This function is called on module load in order to register the driver.
**/
int fm10k_register_pci_driver(void)
{
@@ -2392,7 +2385,7 @@ int fm10k_register_pci_driver(void)
/**
* fm10k_unregister_pci_driver - unregister driver interface
*
- * This funciton is called on module unload in order to remove the driver.
+ * This function is called on module unload in order to remove the driver.
**/
void fm10k_unregister_pci_driver(void)
{
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index 8cf943db5662..682299dd0ce4 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -51,34 +51,37 @@ static s32 fm10k_reset_hw_pf(struct fm10k_hw *hw)
/* shut down all rings */
err = fm10k_disable_queues_generic(hw, FM10K_MAX_QUEUES);
- if (err)
+ if (err == FM10K_ERR_REQUESTS_PENDING) {
+ hw->mac.reset_while_pending++;
+ goto force_reset;
+ } else if (err) {
return err;
+ }
/* Verify that DMA is no longer active */
reg = fm10k_read_reg(hw, FM10K_DMA_CTRL);
if (reg & (FM10K_DMA_CTRL_TX_ACTIVE | FM10K_DMA_CTRL_RX_ACTIVE))
return FM10K_ERR_DMA_PENDING;
- /* verify the switch is ready for reset */
- reg = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
- if (!(reg & FM10K_DMA_CTRL2_SWITCH_READY))
- goto out;
-
+force_reset:
/* Inititate data path reset */
- reg |= FM10K_DMA_CTRL_DATAPATH_RESET;
+ reg = FM10K_DMA_CTRL_DATAPATH_RESET;
fm10k_write_reg(hw, FM10K_DMA_CTRL, reg);
/* Flush write and allow 100us for reset to complete */
fm10k_write_flush(hw);
udelay(FM10K_RESET_TIMEOUT);
+ /* Reset mailbox global interrupts */
+ reg = FM10K_MBX_GLOBAL_REQ_INTERRUPT | FM10K_MBX_GLOBAL_ACK_INTERRUPT;
+ fm10k_write_reg(hw, FM10K_GMBX, reg);
+
/* Verify we made it out of reset */
reg = fm10k_read_reg(hw, FM10K_IP);
if (!(reg & FM10K_IP_NOTINRESET))
- err = FM10K_ERR_RESET_FAILED;
+ return FM10K_ERR_RESET_FAILED;
-out:
- return err;
+ return 0;
}
/**
@@ -219,8 +222,8 @@ static s32 fm10k_update_vlan_pf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
/* VLAN multi-bit write:
* The multi-bit write has several parts to it.
- * 3 2 1 0
- * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
+ * 24 16 8 0
+ * 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | RSVD0 | Length |C|RSVD0| VLAN ID |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -488,6 +491,10 @@ static s32 fm10k_update_lport_state_pf(struct fm10k_hw *hw, u16 glort,
if (!fm10k_glort_valid_pf(hw, glort))
return FM10K_ERR_PARAM;
+ /* reset multicast mode if deleting lport */
+ if (!enable)
+ fm10k_update_xcast_mode_pf(hw, glort, FM10K_XCAST_MODE_NONE);
+
/* construct the lport message from the 2 pieces of data we have */
lport_msg = ((u32)count << 16) | glort;
@@ -527,8 +534,8 @@ static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
return FM10K_ERR_PARAM;
/* determine count of VSIs and queues */
- queue_count = 1 << (dglort->rss_l + dglort->pc_l);
- vsi_count = 1 << (dglort->vsi_l + dglort->queue_l);
+ queue_count = BIT(dglort->rss_l + dglort->pc_l);
+ vsi_count = BIT(dglort->vsi_l + dglort->queue_l);
glort = dglort->glort;
q_idx = dglort->queue_b;
@@ -544,8 +551,8 @@ static s32 fm10k_configure_dglort_map_pf(struct fm10k_hw *hw,
}
/* determine count of PCs and queues */
- queue_count = 1 << (dglort->queue_l + dglort->rss_l + dglort->vsi_l);
- pc_count = 1 << dglort->pc_l;
+ queue_count = BIT(dglort->queue_l + dglort->rss_l + dglort->vsi_l);
+ pc_count = BIT(dglort->pc_l);
/* configure PC for Tx queues */
for (pc = 0; pc < pc_count; pc++) {
@@ -711,8 +718,8 @@ static s32 fm10k_iov_assign_resources_pf(struct fm10k_hw *hw, u16 num_vfs,
FM10K_RXDCTL_WRITE_BACK_MIN_DELAY |
FM10K_RXDCTL_DROP_ON_EMPTY);
fm10k_write_reg(hw, FM10K_RXQCTL(vf_q_idx),
- FM10K_RXQCTL_VF |
- (i << FM10K_RXQCTL_VF_SHIFT));
+ (i << FM10K_RXQCTL_VF_SHIFT) |
+ FM10K_RXQCTL_VF);
/* map queue pair to VF */
fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), vf_q_idx);
@@ -864,9 +871,13 @@ static s32 fm10k_iov_assign_default_mac_vlan_pf(struct fm10k_hw *hw,
fm10k_write_reg(hw, FM10K_TQMAP(qmap_idx), 0);
fm10k_write_reg(hw, FM10K_TXDCTL(vf_q_idx), 0);
- /* determine correct default VLAN ID */
+ /* Determine correct default VLAN ID. The FM10K_VLAN_OVERRIDE bit is
+ * used here to indicate to the VF that it will not have privilege to
+ * write VLAN_TABLE. All policy is enforced on the PF but this allows
+ * the VF to correctly report errors to userspace rqeuests.
+ */
if (vf_info->pf_vid)
- vf_vid = vf_info->pf_vid | FM10K_VLAN_CLEAR;
+ vf_vid = vf_info->pf_vid | FM10K_VLAN_OVERRIDE;
else
vf_vid = vf_info->sw_vid;
@@ -952,7 +963,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
return FM10K_ERR_PARAM;
/* clear event notification of VF FLR */
- fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), 1 << (vf_idx % 32));
+ fm10k_write_reg(hw, FM10K_PFVFLREC(vf_idx / 32), BIT(vf_idx % 32));
/* force timeout and then disconnect the mailbox */
vf_info->mbx.timeout = 0;
@@ -987,7 +998,7 @@ static s32 fm10k_iov_reset_resources_pf(struct fm10k_hw *hw,
txqctl = ((u32)vf_vid << FM10K_TXQCTL_VID_SHIFT) |
(vf_idx << FM10K_TXQCTL_TC_SHIFT) |
FM10K_TXQCTL_VF | vf_idx;
- rxqctl = FM10K_RXQCTL_VF | (vf_idx << FM10K_RXQCTL_VF_SHIFT);
+ rxqctl = (vf_idx << FM10K_RXQCTL_VF_SHIFT) | FM10K_RXQCTL_VF;
/* stop further DMA and reset queue ownership back to VF */
for (i = vf_q_idx; i < (queues_per_pool + vf_q_idx); i++) {
@@ -1140,19 +1151,6 @@ static void fm10k_iov_update_stats_pf(struct fm10k_hw *hw,
fm10k_update_hw_stats_q(hw, q, idx, qpp);
}
-static s32 fm10k_iov_report_timestamp_pf(struct fm10k_hw *hw,
- struct fm10k_vf_info *vf_info,
- u64 timestamp)
-{
- u32 msg[4];
-
- /* generate port state response to notify VF it is not ready */
- fm10k_tlv_msg_init(msg, FM10K_VF_MSG_ID_1588);
- fm10k_tlv_attr_put_u64(msg, FM10K_1588_MSG_TIMESTAMP, timestamp);
-
- return vf_info->mbx.ops.enqueue_tx(hw, &vf_info->mbx, msg);
-}
-
/**
* fm10k_iov_msg_msix_pf - Message handler for MSI-X request from VF
* @hw: Pointer to hardware structure
@@ -1384,7 +1382,7 @@ s32 fm10k_iov_msg_lport_state_pf(struct fm10k_hw *hw, u32 **results,
mode = fm10k_iov_supported_xcast_mode_pf(vf_info, mode);
/* if mode is not currently enabled, enable it */
- if (!(FM10K_VF_FLAG_ENABLED(vf_info) & (1 << mode)))
+ if (!(FM10K_VF_FLAG_ENABLED(vf_info) & BIT(mode)))
fm10k_update_xcast_mode_pf(hw, vf_info->glort, mode);
/* swap mode back to a bit flag */
@@ -1618,35 +1616,27 @@ static s32 fm10k_request_lport_map_pf(struct fm10k_hw *hw)
* @hw: pointer to hardware structure
* @switch_ready: pointer to boolean value that will record switch state
*
- * This funciton will check the DMA_CTRL2 register and mailbox in order
+ * This function will check the DMA_CTRL2 register and mailbox in order
* to determine if the switch is ready for the PF to begin requesting
* addresses and mapping traffic to the local interface.
**/
static s32 fm10k_get_host_state_pf(struct fm10k_hw *hw, bool *switch_ready)
{
- s32 ret_val = 0;
u32 dma_ctrl2;
/* verify the switch is ready for interaction */
dma_ctrl2 = fm10k_read_reg(hw, FM10K_DMA_CTRL2);
if (!(dma_ctrl2 & FM10K_DMA_CTRL2_SWITCH_READY))
- goto out;
+ return 0;
/* retrieve generic host state info */
- ret_val = fm10k_get_host_state_generic(hw, switch_ready);
- if (ret_val)
- goto out;
-
- /* interface cannot receive traffic without logical ports */
- if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE)
- ret_val = fm10k_request_lport_map_pf(hw);
-
-out:
- return ret_val;
+ return fm10k_get_host_state_generic(hw, switch_ready);
}
/* This structure defines the attibutes to be parsed below */
const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[] = {
+ FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_ERR,
+ sizeof(struct fm10k_swapi_error)),
FM10K_TLV_ATTR_U32(FM10K_PF_ATTR_ID_LPORT_MAP),
FM10K_TLV_ATTR_LAST
};
@@ -1787,89 +1777,6 @@ s32 fm10k_msg_err_pf(struct fm10k_hw *hw, u32 **results,
return 0;
}
-const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[] = {
- FM10K_TLV_ATTR_LE_STRUCT(FM10K_PF_ATTR_ID_1588_TIMESTAMP,
- sizeof(struct fm10k_swapi_1588_timestamp)),
- FM10K_TLV_ATTR_LAST
-};
-
-/* currently there is no shared 1588 timestamp handler */
-
-/**
- * fm10k_adjust_systime_pf - Adjust systime frequency
- * @hw: pointer to hardware structure
- * @ppb: adjustment rate in parts per billion
- *
- * This function will adjust the SYSTIME_CFG register contained in BAR 4
- * if this function is supported for BAR 4 access. The adjustment amount
- * is based on the parts per billion value provided and adjusted to a
- * value based on parts per 2^48 clock cycles.
- *
- * If adjustment is not supported or the requested value is too large
- * we will return an error.
- **/
-static s32 fm10k_adjust_systime_pf(struct fm10k_hw *hw, s32 ppb)
-{
- u64 systime_adjust;
-
- /* if sw_addr is not set we don't have switch register access */
- if (!hw->sw_addr)
- return ppb ? FM10K_ERR_PARAM : 0;
-
- /* we must convert the value from parts per billion to parts per
- * 2^48 cycles. In addition I have opted to only use the 30 most
- * significant bits of the adjustment value as the 8 least
- * significant bits are located in another register and represent
- * a value significantly less than a part per billion, the result
- * of dropping the 8 least significant bits is that the adjustment
- * value is effectively multiplied by 2^8 when we write it.
- *
- * As a result of all this the math for this breaks down as follows:
- * ppb / 10^9 == adjust * 2^8 / 2^48
- * If we solve this for adjust, and simplify it comes out as:
- * ppb * 2^31 / 5^9 == adjust
- */
- systime_adjust = (ppb < 0) ? -ppb : ppb;
- systime_adjust <<= 31;
- do_div(systime_adjust, 1953125);
-
- /* verify the requested adjustment value is in range */
- if (systime_adjust > FM10K_SW_SYSTIME_ADJUST_MASK)
- return FM10K_ERR_PARAM;
-
- if (ppb > 0)
- systime_adjust |= FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE;
-
- fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_ADJUST, (u32)systime_adjust);
-
- return 0;
-}
-
-/**
- * fm10k_read_systime_pf - Reads value of systime registers
- * @hw: pointer to the hardware structure
- *
- * Function reads the content of 2 registers, combined to represent a 64 bit
- * value measured in nanosecods. In order to guarantee the value is accurate
- * we check the 32 most significant bits both before and after reading the
- * 32 least significant bits to verify they didn't change as we were reading
- * the registers.
- **/
-static u64 fm10k_read_systime_pf(struct fm10k_hw *hw)
-{
- u32 systime_l, systime_h, systime_tmp;
-
- systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
-
- do {
- systime_tmp = systime_h;
- systime_l = fm10k_read_reg(hw, FM10K_SYSTIME);
- systime_h = fm10k_read_reg(hw, FM10K_SYSTIME + 1);
- } while (systime_tmp != systime_h);
-
- return ((u64)systime_h << 32) | systime_l;
-}
-
static const struct fm10k_msg_data fm10k_msg_data_pf[] = {
FM10K_PF_MSG_ERR_HANDLER(XCAST_MODES, fm10k_msg_err_pf),
FM10K_PF_MSG_ERR_HANDLER(UPDATE_MAC_FWD_RULE, fm10k_msg_err_pf),
@@ -1899,8 +1806,7 @@ static const struct fm10k_mac_ops mac_ops_pf = {
.set_dma_mask = fm10k_set_dma_mask_pf,
.get_fault = fm10k_get_fault_pf,
.get_host_state = fm10k_get_host_state_pf,
- .adjust_systime = fm10k_adjust_systime_pf,
- .read_systime = fm10k_read_systime_pf,
+ .request_lport_map = fm10k_request_lport_map_pf,
};
static const struct fm10k_iov_ops iov_ops_pf = {
@@ -1912,7 +1818,6 @@ static const struct fm10k_iov_ops iov_ops_pf = {
.set_lport = fm10k_iov_set_lport_pf,
.reset_lport = fm10k_iov_reset_lport_pf,
.update_stats = fm10k_iov_update_stats_pf,
- .report_timestamp = fm10k_iov_report_timestamp_pf,
};
static s32 fm10k_get_invariants_pf(struct fm10k_hw *hw)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
index b2d96b45ca3c..3336d3c10760 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -42,8 +42,6 @@ enum fm10k_pf_tlv_msg_id_v1 {
FM10K_PF_MSG_ID_UPDATE_FLOW = 0x503,
FM10K_PF_MSG_ID_DELETE_FLOW = 0x504,
FM10K_PF_MSG_ID_SET_FLOW_STATE = 0x505,
- FM10K_PF_MSG_ID_GET_1588_INFO = 0x506,
- FM10K_PF_MSG_ID_1588_TIMESTAMP = 0x701,
};
enum fm10k_pf_tlv_attr_id_v1 {
@@ -61,7 +59,6 @@ enum fm10k_pf_tlv_attr_id_v1 {
FM10K_PF_ATTR_ID_DELETE_FLOW = 0x0B,
FM10K_PF_ATTR_ID_PORT = 0x0C,
FM10K_PF_ATTR_ID_UPDATE_PVID = 0x0D,
- FM10K_PF_ATTR_ID_1588_TIMESTAMP = 0x10,
};
#define FM10K_MSG_LPORT_MAP_GLORT_SHIFT 0
@@ -74,6 +71,8 @@ enum fm10k_pf_tlv_attr_id_v1 {
#define FM10K_MSG_UPDATE_PVID_PVID_SHIFT 16
#define FM10K_MSG_UPDATE_PVID_PVID_SIZE 16
+#define FM10K_MSG_ERR_PEP_NOT_SCHEDULED 280
+
/* The following data structures are overlayed directly onto TLV mailbox
* messages, and must not break 4 byte alignment. Ensure the structures line
* up correctly as per their TLV definition.
@@ -100,13 +99,6 @@ struct fm10k_swapi_error {
struct fm10k_global_table_data ffu;
} __aligned(4) __packed;
-struct fm10k_swapi_1588_timestamp {
- __le64 egress;
- __le64 ingress;
- __le16 dglort;
- __le16 sglort;
-} __aligned(4) __packed;
-
s32 fm10k_msg_lport_map_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
extern const struct fm10k_tlv_attr fm10k_lport_map_msg_attr[];
#define FM10K_PF_MSG_LPORT_MAP_HANDLER(func) \
@@ -122,11 +114,6 @@ extern const struct fm10k_tlv_attr fm10k_err_msg_attr[];
#define FM10K_PF_MSG_ERR_HANDLER(msg, func) \
FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_##msg, fm10k_err_msg_attr, func)
-extern const struct fm10k_tlv_attr fm10k_1588_timestamp_msg_attr[];
-#define FM10K_PF_MSG_1588_TIMESTAMP_HANDLER(func) \
- FM10K_MSG_HANDLER(FM10K_PF_MSG_ID_1588_TIMESTAMP, \
- fm10k_1588_timestamp_msg_attr, func)
-
s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *, u32 **, struct fm10k_mbx_info *);
s32 fm10k_iov_msg_mac_vlan_pf(struct fm10k_hw *, u32 **,
struct fm10k_mbx_info *);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c b/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
deleted file mode 100644
index b4945e8abe03..000000000000
--- a/drivers/net/ethernet/intel/fm10k/fm10k_ptp.c
+++ /dev/null
@@ -1,462 +0,0 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * The full GNU General Public License is included in this distribution in
- * the file called "COPYING".
- *
- * Contact Information:
- * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- */
-
-#include <linux/ptp_classify.h>
-#include <linux/ptp_clock_kernel.h>
-
-#include "fm10k.h"
-
-#define FM10K_TS_TX_TIMEOUT (HZ * 15)
-
-void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
- struct skb_shared_hwtstamps *hwtstamp,
- u64 systime)
-{
- unsigned long flags;
-
- read_lock_irqsave(&interface->systime_lock, flags);
- systime += interface->ptp_adjust;
- read_unlock_irqrestore(&interface->systime_lock, flags);
-
- hwtstamp->hwtstamp = ns_to_ktime(systime);
-}
-
-static struct sk_buff *fm10k_ts_tx_skb(struct fm10k_intfc *interface,
- __le16 dglort)
-{
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *skb;
-
- skb_queue_walk(list, skb) {
- if (FM10K_CB(skb)->fi.w.dglort == dglort)
- return skb;
- }
-
- return NULL;
-}
-
-void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb)
-{
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *clone;
- unsigned long flags;
-
- /* create clone for us to return on the Tx path */
- clone = skb_clone_sk(skb);
- if (!clone)
- return;
-
- FM10K_CB(clone)->ts_tx_timeout = jiffies + FM10K_TS_TX_TIMEOUT;
- spin_lock_irqsave(&list->lock, flags);
-
- /* attempt to locate any buffers with the same dglort,
- * if none are present then insert skb in tail of list
- */
- skb = fm10k_ts_tx_skb(interface, FM10K_CB(clone)->fi.w.dglort);
- if (!skb) {
- skb_shinfo(clone)->tx_flags |= SKBTX_IN_PROGRESS;
- __skb_queue_tail(list, clone);
- }
-
- spin_unlock_irqrestore(&list->lock, flags);
-
- /* if list is already has one then we just free the clone */
- if (skb)
- dev_kfree_skb(clone);
-}
-
-void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
- u64 systime)
-{
- struct skb_shared_hwtstamps shhwtstamps;
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *skb;
- unsigned long flags;
-
- spin_lock_irqsave(&list->lock, flags);
-
- /* attempt to locate and pull the sk_buff out of the list */
- skb = fm10k_ts_tx_skb(interface, dglort);
- if (skb)
- __skb_unlink(skb, list);
-
- spin_unlock_irqrestore(&list->lock, flags);
-
- /* if not found do nothing */
- if (!skb)
- return;
-
- /* timestamp the sk_buff and free out copy */
- fm10k_systime_to_hwtstamp(interface, &shhwtstamps, systime);
- skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
-}
-
-void fm10k_ts_tx_subtask(struct fm10k_intfc *interface)
-{
- struct sk_buff_head *list = &interface->ts_tx_skb_queue;
- struct sk_buff *skb, *tmp;
- unsigned long flags;
-
- /* If we're down or resetting, just bail */
- if (test_bit(__FM10K_DOWN, &interface->state) ||
- test_bit(__FM10K_RESETTING, &interface->state))
- return;
-
- spin_lock_irqsave(&list->lock, flags);
-
- /* walk though the list and flush any expired timestamp packets */
- skb_queue_walk_safe(list, skb, tmp) {
- if (!time_is_after_jiffies(FM10K_CB(skb)->ts_tx_timeout))
- continue;
- __skb_unlink(skb, list);
- kfree_skb(skb);
- interface->tx_hwtstamp_timeouts++;
- }
-
- spin_unlock_irqrestore(&list->lock, flags);
-}
-
-static u64 fm10k_systime_read(struct fm10k_intfc *interface)
-{
- struct fm10k_hw *hw = &interface->hw;
-
- return hw->mac.ops.read_systime(hw);
-}
-
-void fm10k_ts_reset(struct fm10k_intfc *interface)
-{
- s64 ns = ktime_to_ns(ktime_get_real());
- unsigned long flags;
-
- /* reinitialize the clock */
- write_lock_irqsave(&interface->systime_lock, flags);
- interface->ptp_adjust = fm10k_systime_read(interface) - ns;
- write_unlock_irqrestore(&interface->systime_lock, flags);
-}
-
-void fm10k_ts_init(struct fm10k_intfc *interface)
-{
- /* Initialize lock protecting systime access */
- rwlock_init(&interface->systime_lock);
-
- /* Initialize skb queue for pending timestamp requests */
- skb_queue_head_init(&interface->ts_tx_skb_queue);
-
- /* reset the clock to current kernel time */
- fm10k_ts_reset(interface);
-}
-
-/**
- * fm10k_get_ts_config - get current hardware timestamping configuration
- * @netdev: network interface device structure
- * @ifreq: ioctl data
- *
- * This function returns the current timestamping settings. Rather than
- * attempt to deconstruct registers to fill in the values, simply keep a copy
- * of the old settings around, and return a copy when requested.
- */
-int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
-{
- struct fm10k_intfc *interface = netdev_priv(netdev);
- struct hwtstamp_config *config = &interface->ts_config;
-
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
- -EFAULT : 0;
-}
-
-/**
- * fm10k_set_ts_config - control hardware time stamping
- * @netdev: network interface device structure
- * @ifreq: ioctl data
- *
- * Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't cause any overhead
- * when no packet needs it. At most one packet in the queue may be
- * marked for time stamping, otherwise it would be impossible to tell
- * for sure to which packet the hardware time stamp belongs.
- *
- * Incoming time stamping has to be configured via the hardware
- * filters. Not all combinations are supported, in particular event
- * type has to be specified. Matching the kind of event packet is
- * not supported, with the exception of "all V2 events regardless of
- * level 2 or 4".
- *
- * Since hardware always timestamps Path delay packets when timestamping V2
- * packets, regardless of the type specified in the register, only use V2
- * Event mode. This more accurately tells the user what the hardware is going
- * to do anyways.
- */
-int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
-{
- struct fm10k_intfc *interface = netdev_priv(netdev);
- struct hwtstamp_config ts_config;
-
- if (copy_from_user(&ts_config, ifr->ifr_data, sizeof(ts_config)))
- return -EFAULT;
-
- /* reserved for future extensions */
- if (ts_config.flags)
- return -EINVAL;
-
- switch (ts_config.tx_type) {
- case HWTSTAMP_TX_OFF:
- break;
- case HWTSTAMP_TX_ON:
- /* we likely need some check here to see if this is supported */
- break;
- default:
- return -ERANGE;
- }
-
- switch (ts_config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- interface->flags &= ~FM10K_FLAG_RX_TS_ENABLED;
- break;
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_ALL:
- interface->flags |= FM10K_FLAG_RX_TS_ENABLED;
- ts_config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- return -ERANGE;
- }
-
- /* save these settings for future reference */
- interface->ts_config = ts_config;
-
- return copy_to_user(ifr->ifr_data, &ts_config, sizeof(ts_config)) ?
- -EFAULT : 0;
-}
-
-static int fm10k_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
-{
- struct fm10k_intfc *interface;
- struct fm10k_hw *hw;
- int err;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
- hw = &interface->hw;
-
- err = hw->mac.ops.adjust_systime(hw, ppb);
-
- /* the only error we should see is if the value is out of range */
- return (err == FM10K_ERR_PARAM) ? -ERANGE : err;
-}
-
-static int fm10k_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
- struct fm10k_intfc *interface;
- unsigned long flags;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
- write_lock_irqsave(&interface->systime_lock, flags);
- interface->ptp_adjust += delta;
- write_unlock_irqrestore(&interface->systime_lock, flags);
-
- return 0;
-}
-
-static int fm10k_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
-{
- struct fm10k_intfc *interface;
- unsigned long flags;
- u64 now;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
- read_lock_irqsave(&interface->systime_lock, flags);
- now = fm10k_systime_read(interface) + interface->ptp_adjust;
- read_unlock_irqrestore(&interface->systime_lock, flags);
-
- *ts = ns_to_timespec64(now);
-
- return 0;
-}
-
-static int fm10k_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
-{
- struct fm10k_intfc *interface;
- unsigned long flags;
- u64 ns = timespec64_to_ns(ts);
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
-
- write_lock_irqsave(&interface->systime_lock, flags);
- interface->ptp_adjust = fm10k_systime_read(interface) - ns;
- write_unlock_irqrestore(&interface->systime_lock, flags);
-
- return 0;
-}
-
-static int fm10k_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int __always_unused on)
-{
- struct ptp_clock_time *t = &rq->perout.period;
- struct fm10k_intfc *interface;
- struct fm10k_hw *hw;
- u64 period;
- u32 step;
-
- /* we can only support periodic output */
- if (rq->type != PTP_CLK_REQ_PEROUT)
- return -EINVAL;
-
- /* verify the requested channel is there */
- if (rq->perout.index >= ptp->n_per_out)
- return -EINVAL;
-
- /* we cannot enforce start time as there is no
- * mechanism for that in the hardware, we can only control
- * the period.
- */
-
- /* we cannot support periods greater than 4 seconds due to reg limit */
- if (t->sec > 4 || t->sec < 0)
- return -ERANGE;
-
- interface = container_of(ptp, struct fm10k_intfc, ptp_caps);
- hw = &interface->hw;
-
- /* we simply cannot support the operation if we don't have BAR4 */
- if (!hw->sw_addr)
- return -ENOTSUPP;
-
- /* convert to unsigned 64b ns, verify we can put it in a 32b register */
- period = t->sec * 1000000000LL + t->nsec;
-
- /* determine the minimum size for period */
- step = 2 * (fm10k_read_reg(hw, FM10K_SYSTIME_CFG) &
- FM10K_SYSTIME_CFG_STEP_MASK);
-
- /* verify the value is in range supported by hardware */
- if ((period && (period < step)) || (period > U32_MAX))
- return -ERANGE;
-
- /* notify hardware of request to being sending pulses */
- fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_PULSE(rq->perout.index),
- (u32)period);
-
- return 0;
-}
-
-static struct ptp_pin_desc fm10k_ptp_pd[2] = {
- {
- .name = "IEEE1588_PULSE0",
- .index = 0,
- .func = PTP_PF_PEROUT,
- .chan = 0
- },
- {
- .name = "IEEE1588_PULSE1",
- .index = 1,
- .func = PTP_PF_PEROUT,
- .chan = 1
- }
-};
-
-static int fm10k_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
- enum ptp_pin_function func, unsigned int chan)
-{
- /* verify the requested pin is there */
- if (pin >= ptp->n_pins || !ptp->pin_config)
- return -EINVAL;
-
- /* enforce locked channels, no changing them */
- if (chan != ptp->pin_config[pin].chan)
- return -EINVAL;
-
- /* we want to keep the functions locked as well */
- if (func != ptp->pin_config[pin].func)
- return -EINVAL;
-
- return 0;
-}
-
-void fm10k_ptp_register(struct fm10k_intfc *interface)
-{
- struct ptp_clock_info *ptp_caps = &interface->ptp_caps;
- struct device *dev = &interface->pdev->dev;
- struct ptp_clock *ptp_clock;
-
- snprintf(ptp_caps->name, sizeof(ptp_caps->name),
- "%s", interface->netdev->name);
- ptp_caps->owner = THIS_MODULE;
- /* This math is simply the inverse of the math in
- * fm10k_adjust_systime_pf applied to an adjustment value
- * of 2^30 - 1 which is the maximum value of the register:
- * max_ppb == ((2^30 - 1) * 5^9) / 2^31
- */
- ptp_caps->max_adj = 976562;
- ptp_caps->adjfreq = fm10k_ptp_adjfreq;
- ptp_caps->adjtime = fm10k_ptp_adjtime;
- ptp_caps->gettime64 = fm10k_ptp_gettime;
- ptp_caps->settime64 = fm10k_ptp_settime;
-
- /* provide pins if BAR4 is accessible */
- if (interface->sw_addr) {
- /* enable periodic outputs */
- ptp_caps->n_per_out = 2;
- ptp_caps->enable = fm10k_ptp_enable;
-
- /* enable clock pins */
- ptp_caps->verify = fm10k_ptp_verify;
- ptp_caps->n_pins = 2;
- ptp_caps->pin_config = fm10k_ptp_pd;
- }
-
- ptp_clock = ptp_clock_register(ptp_caps, dev);
- if (IS_ERR(ptp_clock)) {
- ptp_clock = NULL;
- dev_err(dev, "ptp_clock_register failed\n");
- } else {
- dev_info(dev, "registered PHC device %s\n", ptp_caps->name);
- }
-
- interface->ptp_clock = ptp_clock;
-}
-
-void fm10k_ptp_unregister(struct fm10k_intfc *interface)
-{
- struct ptp_clock *ptp_clock = interface->ptp_clock;
- struct device *dev = &interface->pdev->dev;
-
- if (!ptp_clock)
- return;
-
- interface->ptp_clock = NULL;
-
- ptp_clock_unregister(ptp_clock);
- dev_info(dev, "removed PHC %s\n", interface->ptp_caps.name);
-}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index ab01bb30752f..f8e87bf086b9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -222,7 +222,7 @@ s32 fm10k_tlv_attr_put_value(u32 *msg, u16 attr_id, s64 value, u32 len)
attr = &msg[FM10K_TLV_DWORD_LEN(*msg)];
if (len < 4) {
- attr[1] = (u32)value & ((0x1ul << (8 * len)) - 1);
+ attr[1] = (u32)value & (BIT(8 * len) - 1);
} else {
attr[1] = (u32)value;
if (len > 4)
@@ -481,7 +481,8 @@ static s32 fm10k_tlv_attr_validate(u32 *attr,
* up into an array of pointers stored in results. The function will
* return FM10K_ERR_PARAM on any input or message error,
* FM10K_NOT_IMPLEMENTED for any attribute that is outside of the array
- * and 0 on success.
+ * and 0 on success. Any attributes not found in tlv_attr will be silently
+ * ignored.
**/
static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
const struct fm10k_tlv_attr *tlv_attr)
@@ -518,14 +519,15 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
while (offset < len) {
attr_id = *attr & FM10K_TLV_ID_MASK;
- if (attr_id < FM10K_TLV_RESULTS_MAX)
- err = fm10k_tlv_attr_validate(attr, tlv_attr);
- else
- err = FM10K_NOT_IMPLEMENTED;
+ if (attr_id >= FM10K_TLV_RESULTS_MAX)
+ return FM10K_NOT_IMPLEMENTED;
- if (err < 0)
+ err = fm10k_tlv_attr_validate(attr, tlv_attr);
+ if (err == FM10K_NOT_IMPLEMENTED)
+ ; /* silently ignore non-implemented attributes */
+ else if (err)
return err;
- if (!err)
+ else
results[attr_id] = attr;
/* update offset */
@@ -652,29 +654,29 @@ const struct fm10k_tlv_attr fm10k_tlv_msg_test_attr[] = {
**/
static void fm10k_tlv_msg_test_generate_data(u32 *msg, u32 attr_flags)
{
- if (attr_flags & (1 << FM10K_TEST_MSG_STRING))
+ if (attr_flags & BIT(FM10K_TEST_MSG_STRING))
fm10k_tlv_attr_put_null_string(msg, FM10K_TEST_MSG_STRING,
test_str);
- if (attr_flags & (1 << FM10K_TEST_MSG_MAC_ADDR))
+ if (attr_flags & BIT(FM10K_TEST_MSG_MAC_ADDR))
fm10k_tlv_attr_put_mac_vlan(msg, FM10K_TEST_MSG_MAC_ADDR,
test_mac, test_vlan);
- if (attr_flags & (1 << FM10K_TEST_MSG_U8))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U8))
fm10k_tlv_attr_put_u8(msg, FM10K_TEST_MSG_U8, test_u8);
- if (attr_flags & (1 << FM10K_TEST_MSG_U16))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U16))
fm10k_tlv_attr_put_u16(msg, FM10K_TEST_MSG_U16, test_u16);
- if (attr_flags & (1 << FM10K_TEST_MSG_U32))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U32))
fm10k_tlv_attr_put_u32(msg, FM10K_TEST_MSG_U32, test_u32);
- if (attr_flags & (1 << FM10K_TEST_MSG_U64))
+ if (attr_flags & BIT(FM10K_TEST_MSG_U64))
fm10k_tlv_attr_put_u64(msg, FM10K_TEST_MSG_U64, test_u64);
- if (attr_flags & (1 << FM10K_TEST_MSG_S8))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S8))
fm10k_tlv_attr_put_s8(msg, FM10K_TEST_MSG_S8, test_s8);
- if (attr_flags & (1 << FM10K_TEST_MSG_S16))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S16))
fm10k_tlv_attr_put_s16(msg, FM10K_TEST_MSG_S16, test_s16);
- if (attr_flags & (1 << FM10K_TEST_MSG_S32))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S32))
fm10k_tlv_attr_put_s32(msg, FM10K_TEST_MSG_S32, test_s32);
- if (attr_flags & (1 << FM10K_TEST_MSG_S64))
+ if (attr_flags & BIT(FM10K_TEST_MSG_S64))
fm10k_tlv_attr_put_s64(msg, FM10K_TEST_MSG_S64, test_s64);
- if (attr_flags & (1 << FM10K_TEST_MSG_LE_STRUCT))
+ if (attr_flags & BIT(FM10K_TEST_MSG_LE_STRUCT))
fm10k_tlv_attr_put_le_struct(msg, FM10K_TEST_MSG_LE_STRUCT,
test_le, 8);
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
index e1845e0a17d8..a1f1027fe184 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_type.h b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
index 854ebb1906bf..f4e75c498287 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_type.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_type.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -225,11 +225,6 @@ struct fm10k_hw;
#define FM10K_STATS_LOOPBACK_DROP 0x3806
#define FM10K_STATS_NODESC_DROP 0x3807
-/* Timesync registers */
-#define FM10K_SYSTIME 0x3814
-#define FM10K_SYSTIME_CFG 0x3818
-#define FM10K_SYSTIME_CFG_STEP_MASK 0x0000000F
-
/* PCIe state registers */
#define FM10K_PHYADDR 0x381C
@@ -355,6 +350,7 @@ struct fm10k_hw;
#define FM10K_VLAN_TABLE_VSI_MAX 64
#define FM10K_VLAN_LENGTH_SHIFT 16
#define FM10K_VLAN_CLEAR BIT(15)
+#define FM10K_VLAN_OVERRIDE FM10K_VLAN_CLEAR
#define FM10K_VLAN_ALL \
((FM10K_VLAN_TABLE_VID_MAX - 1) << FM10K_VLAN_LENGTH_SHIFT)
@@ -381,12 +377,6 @@ struct fm10k_hw;
#define FM10K_VFSYSTIME 0x00040
#define FM10K_VFITR(_n) ((_n) + 0x00060)
-/* Registers contained in BAR 4 for Switch management */
-#define FM10K_SW_SYSTIME_ADJUST 0x0224D
-#define FM10K_SW_SYSTIME_ADJUST_MASK 0x3FFFFFFF
-#define FM10K_SW_SYSTIME_ADJUST_DIR_POSITIVE 0x80000000
-#define FM10K_SW_SYSTIME_PULSE(_n) ((_n) + 0x02252)
-
enum fm10k_int_source {
fm10k_int_mailbox = 0,
fm10k_int_pcie_fault = 1,
@@ -536,6 +526,7 @@ struct fm10k_mac_ops {
s32 (*stop_hw)(struct fm10k_hw *);
s32 (*get_bus_info)(struct fm10k_hw *);
s32 (*get_host_state)(struct fm10k_hw *, bool *);
+ s32 (*request_lport_map)(struct fm10k_hw *);
s32 (*update_vlan)(struct fm10k_hw *, u32, u8, bool);
s32 (*read_mac_addr)(struct fm10k_hw *);
s32 (*update_uc_addr)(struct fm10k_hw *, u16, const u8 *,
@@ -550,8 +541,6 @@ struct fm10k_mac_ops {
struct fm10k_dglort_cfg *);
void (*set_dma_mask)(struct fm10k_hw *, u64);
s32 (*get_fault)(struct fm10k_hw *, int, struct fm10k_fault *);
- s32 (*adjust_systime)(struct fm10k_hw *, s32 ppb);
- u64 (*read_systime)(struct fm10k_hw *);
};
enum fm10k_mac_type {
@@ -574,6 +563,7 @@ struct fm10k_mac_info {
bool tx_ready;
u32 dglort_map;
u8 itr_scale;
+ u64 reset_while_pending;
};
struct fm10k_swapi_table_info {
@@ -617,10 +607,10 @@ struct fm10k_vf_info {
*/
};
-#define FM10K_VF_FLAG_ALLMULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_ALLMULTI)
-#define FM10K_VF_FLAG_MULTI_CAPABLE ((u8)1 << FM10K_XCAST_MODE_MULTI)
-#define FM10K_VF_FLAG_PROMISC_CAPABLE ((u8)1 << FM10K_XCAST_MODE_PROMISC)
-#define FM10K_VF_FLAG_NONE_CAPABLE ((u8)1 << FM10K_XCAST_MODE_NONE)
+#define FM10K_VF_FLAG_ALLMULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_ALLMULTI))
+#define FM10K_VF_FLAG_MULTI_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_MULTI))
+#define FM10K_VF_FLAG_PROMISC_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_PROMISC))
+#define FM10K_VF_FLAG_NONE_CAPABLE (u8)(BIT(FM10K_XCAST_MODE_NONE))
#define FM10K_VF_FLAG_CAPABLE(vf_info) ((vf_info)->vf_flags & (u8)0xF)
#define FM10K_VF_FLAG_ENABLED(vf_info) ((vf_info)->vf_flags >> 4)
#define FM10K_VF_FLAG_SET_MODE(mode) ((u8)0x10 << (mode))
@@ -643,7 +633,6 @@ struct fm10k_iov_ops {
s32 (*set_lport)(struct fm10k_hw *, struct fm10k_vf_info *, u16, u8);
void (*reset_lport)(struct fm10k_hw *, struct fm10k_vf_info *);
void (*update_stats)(struct fm10k_hw *, struct fm10k_hw_stats_q *, u16);
- s32 (*report_timestamp)(struct fm10k_hw *, struct fm10k_vf_info *, u64);
};
struct fm10k_iov_info {
@@ -667,7 +656,6 @@ struct fm10k_info {
struct fm10k_hw {
u32 __iomem *hw_addr;
- u32 __iomem *sw_addr;
void *back;
struct fm10k_mac_info mac;
struct fm10k_bus_info bus;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
index 91f8d7311f3b..337ba65a9411 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.c
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -34,7 +34,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
/* we need to disable the queues before taking further steps */
err = fm10k_stop_hw_generic(hw);
- if (err)
+ if (err && err != FM10K_ERR_REQUESTS_PENDING)
return err;
/* If permanent address is set then we need to restore it */
@@ -67,7 +67,7 @@ static s32 fm10k_stop_hw_vf(struct fm10k_hw *hw)
fm10k_write_reg(hw, FM10K_TDLEN(i), tdlen);
}
- return 0;
+ return err;
}
/**
@@ -83,7 +83,9 @@ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
/* shut down queues we own and reset DMA configuration */
err = fm10k_stop_hw_vf(hw);
- if (err)
+ if (err == FM10K_ERR_REQUESTS_PENDING)
+ hw->mac.reset_while_pending++;
+ else if (err)
return err;
/* Inititate VF reset */
@@ -96,9 +98,9 @@ static s32 fm10k_reset_hw_vf(struct fm10k_hw *hw)
/* Clear reset bit and verify it was cleared */
fm10k_write_reg(hw, FM10K_VFCTRL, 0);
if (fm10k_read_reg(hw, FM10K_VFCTRL) & FM10K_VFCTRL_RST)
- err = FM10K_ERR_RESET_FAILED;
+ return FM10K_ERR_RESET_FAILED;
- return err;
+ return 0;
}
/**
@@ -188,7 +190,7 @@ static s32 fm10k_update_vlan_vf(struct fm10k_hw *hw, u32 vid, u8 vsi, bool set)
if (vsi)
return FM10K_ERR_PARAM;
- /* verify upper 4 bits of vid and length are 0 */
+ /* clever trick to verify reserved bits in both vid and length */
if ((vid << 16 | vid) >> 28)
return FM10K_ERR_PARAM;
@@ -228,7 +230,7 @@ s32 fm10k_msg_mac_vlan_vf(struct fm10k_hw *hw, u32 **results,
ether_addr_copy(hw->mac.perm_addr, perm_addr);
hw->mac.default_vid = vid & (FM10K_VLAN_TABLE_VID_MAX - 1);
- hw->mac.vlan_override = !!(vid & FM10K_VLAN_CLEAR);
+ hw->mac.vlan_override = !!(vid & FM10K_VLAN_OVERRIDE);
return 0;
}
@@ -451,13 +453,6 @@ static s32 fm10k_update_xcast_mode_vf(struct fm10k_hw *hw, u16 glort, u8 mode)
return mbx->ops.enqueue_tx(hw, mbx, msg);
}
-const struct fm10k_tlv_attr fm10k_1588_msg_attr[] = {
- FM10K_TLV_ATTR_U64(FM10K_1588_MSG_TIMESTAMP),
- FM10K_TLV_ATTR_LAST
-};
-
-/* currently there is no shared 1588 timestamp handler */
-
/**
* fm10k_update_hw_stats_vf - Updates hardware related statistics of VF
* @hw: pointer to hardware structure
@@ -509,52 +504,6 @@ static s32 fm10k_configure_dglort_map_vf(struct fm10k_hw *hw,
return 0;
}
-/**
- * fm10k_adjust_systime_vf - Adjust systime frequency
- * @hw: pointer to hardware structure
- * @ppb: adjustment rate in parts per billion
- *
- * This function takes an adjustment rate in parts per billion and will
- * verify that this value is 0 as the VF cannot support adjusting the
- * systime clock.
- *
- * If the ppb value is non-zero the return is ERR_PARAM else success
- **/
-static s32 fm10k_adjust_systime_vf(struct fm10k_hw *hw, s32 ppb)
-{
- /* The VF cannot adjust the clock frequency, however it should
- * already have a syntonic clock with whichever host interface is
- * running as the master for the host interface clock domain so
- * there should be not frequency adjustment necessary.
- */
- return ppb ? FM10K_ERR_PARAM : 0;
-}
-
-/**
- * fm10k_read_systime_vf - Reads value of systime registers
- * @hw: pointer to the hardware structure
- *
- * Function reads the content of 2 registers, combined to represent a 64 bit
- * value measured in nanoseconds. In order to guarantee the value is accurate
- * we check the 32 most significant bits both before and after reading the
- * 32 least significant bits to verify they didn't change as we were reading
- * the registers.
- **/
-static u64 fm10k_read_systime_vf(struct fm10k_hw *hw)
-{
- u32 systime_l, systime_h, systime_tmp;
-
- systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
-
- do {
- systime_tmp = systime_h;
- systime_l = fm10k_read_reg(hw, FM10K_VFSYSTIME);
- systime_h = fm10k_read_reg(hw, FM10K_VFSYSTIME + 1);
- } while (systime_tmp != systime_h);
-
- return ((u64)systime_h << 32) | systime_l;
-}
-
static const struct fm10k_msg_data fm10k_msg_data_vf[] = {
FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
@@ -579,8 +528,6 @@ static const struct fm10k_mac_ops mac_ops_vf = {
.rebind_hw_stats = fm10k_rebind_hw_stats_vf,
.configure_dglort_map = fm10k_configure_dglort_map_vf,
.get_host_state = fm10k_get_host_state_generic,
- .adjust_systime = fm10k_adjust_systime_vf,
- .read_systime = fm10k_read_systime_vf,
};
static s32 fm10k_get_invariants_vf(struct fm10k_hw *hw)
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
index c4439f1313a0..2662f33c0c71 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_vf.h
@@ -1,5 +1,5 @@
-/* Intel Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2014 Intel Corporation.
+/* Intel(R) Ethernet Switch Host Interface Driver
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -29,7 +29,6 @@ enum fm10k_vf_tlv_msg_id {
FM10K_VF_MSG_ID_MSIX,
FM10K_VF_MSG_ID_MAC_VLAN,
FM10K_VF_MSG_ID_LPORT_STATE,
- FM10K_VF_MSG_ID_1588,
FM10K_VF_MSG_ID_MAX,
};
@@ -49,11 +48,6 @@ enum fm10k_tlv_lport_state_attr_id {
FM10K_LPORT_STATE_MSG_MAX
};
-enum fm10k_tlv_1588_attr_id {
- FM10K_1588_MSG_TIMESTAMP,
- FM10K_1588_MSG_MAX
-};
-
#define FM10K_VF_MSG_MSIX_HANDLER(func) \
FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_MSIX, NULL, func)
@@ -70,9 +64,5 @@ extern const struct fm10k_tlv_attr fm10k_lport_state_msg_attr[];
FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_LPORT_STATE, \
fm10k_lport_state_msg_attr, func)
-extern const struct fm10k_tlv_attr fm10k_1588_msg_attr[];
-#define FM10K_VF_MSG_1588_HANDLER(func) \
- FM10K_MSG_HANDLER(FM10K_VF_MSG_ID_1588, fm10k_1588_msg_attr, func)
-
extern const struct fm10k_info fm10k_vf_info;
#endif /* _FM10K_VF_H */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 1ce6e9c0427d..2a882916b4f6 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -97,12 +97,12 @@
#define I40E_INT_NAME_STR_LEN (IFNAMSIZ + 16)
/* Ethtool Private Flags */
-#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0)
-#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
-#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
-#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
-#define I40E_PRIV_FLAGS_PS BIT(4)
-#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(5)
+#define I40E_PRIV_FLAGS_MFP_FLAG BIT(0)
+#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
+#define I40E_PRIV_FLAGS_FD_ATR BIT(2)
+#define I40E_PRIV_FLAGS_VEB_STATS BIT(3)
+#define I40E_PRIV_FLAGS_HW_ATR_EVICT BIT(4)
+#define I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT BIT(5)
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -112,7 +112,9 @@
#define I40E_OEM_VER_PATCH_MASK 0xff
#define I40E_OEM_VER_BUILD_SHIFT 8
#define I40E_OEM_VER_SHIFT 24
-#define I40E_PHY_DEBUG_PORT BIT(4)
+#define I40E_PHY_DEBUG_ALL \
+ (I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW | \
+ I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW)
/* The values in here are decimal coded as hex as is the case in the NVM map*/
#define I40E_CURRENT_NVM_VERSION_HI 0x2
@@ -123,10 +125,7 @@
#define XSTRINGIFY(bar) STRINGIFY(bar)
#define I40E_RX_DESC(R, i) \
- ((ring_is_16byte_desc_enabled(R)) \
- ? (union i40e_32byte_rx_desc *) \
- (&(((union i40e_16byte_rx_desc *)((R)->desc))[i])) \
- : (&(((union i40e_32byte_rx_desc *)((R)->desc))[i])))
+ (&(((union i40e_32byte_rx_desc *)((R)->desc))[i]))
#define I40E_TX_DESC(R, i) \
(&(((struct i40e_tx_desc *)((R)->desc))[i]))
#define I40E_TX_CTXTDESC(R, i) \
@@ -202,6 +201,7 @@ struct i40e_lump_tracking {
#define I40E_HKEY_ARRAY_SIZE ((I40E_PFQF_HKEY_MAX_INDEX + 1) * 4)
#define I40E_HLUT_ARRAY_SIZE ((I40E_PFQF_HLUT_MAX_INDEX + 1) * 4)
+#define I40E_VF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT1_MAX_INDEX + 1) * 4)
enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR,
@@ -244,7 +244,6 @@ struct i40e_fdir_filter {
#define I40E_DCB_PRIO_TYPE_STRICT 0
#define I40E_DCB_PRIO_TYPE_ETS 1
#define I40E_DCB_STRICT_PRIO_CREDITS 127
-#define I40E_MAX_USER_PRIORITY 8
/* DCB per TC information data structure */
struct i40e_tc_info {
u16 qoffset; /* Queue offset from base queue */
@@ -284,6 +283,7 @@ struct i40e_pf {
#endif /* I40E_FCOE */
u16 num_lan_qps; /* num lan queues this PF has set up */
u16 num_lan_msix; /* num queue vectors for the base PF vsi */
+ u16 num_fdsb_msix; /* num queue vectors for sideband Fdir */
u16 num_iwarp_msix; /* num of iwarp vectors for this PF */
int iwarp_base_vector;
int queues_left; /* queues left unclaimed */
@@ -320,8 +320,6 @@ struct i40e_pf {
#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(1)
#define I40E_FLAG_MSI_ENABLED BIT_ULL(2)
#define I40E_FLAG_MSIX_ENABLED BIT_ULL(3)
-#define I40E_FLAG_RX_1BUF_ENABLED BIT_ULL(4)
-#define I40E_FLAG_RX_PS_ENABLED BIT_ULL(5)
#define I40E_FLAG_RSS_ENABLED BIT_ULL(6)
#define I40E_FLAG_VMDQ_ENABLED BIT_ULL(7)
#define I40E_FLAG_FDIR_REQUIRES_REINIT BIT_ULL(8)
@@ -330,7 +328,6 @@ struct i40e_pf {
#ifdef I40E_FCOE
#define I40E_FLAG_FCOE_ENABLED BIT_ULL(11)
#endif /* I40E_FCOE */
-#define I40E_FLAG_16BYTE_RX_DESC_ENABLED BIT_ULL(13)
#define I40E_FLAG_CLEAN_ADMINQ BIT_ULL(14)
#define I40E_FLAG_FILTER_SYNC BIT_ULL(15)
#define I40E_FLAG_SERVICE_CLIENT_REQUESTED BIT_ULL(16)
@@ -363,6 +360,7 @@ struct i40e_pf {
#define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47)
#define I40E_FLAG_HAVE_10GBASET_PHY BIT_ULL(48)
#define I40E_FLAG_PF_MAC BIT_ULL(50)
+#define I40E_FLAG_TRUE_PROMISC_SUPPORT BIT_ULL(51)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
@@ -450,6 +448,14 @@ struct i40e_pf {
u16 phy_led_val;
};
+enum i40e_filter_state {
+ I40E_FILTER_INVALID = 0, /* Invalid state */
+ I40E_FILTER_NEW, /* New, not sent to FW yet */
+ I40E_FILTER_ACTIVE, /* Added to switch by FW */
+ I40E_FILTER_FAILED, /* Rejected by FW */
+ I40E_FILTER_REMOVE, /* To be removed */
+/* There is no 'removed' state; the filter struct is freed */
+};
struct i40e_mac_filter {
struct list_head list;
u8 macaddr[ETH_ALEN];
@@ -458,8 +464,7 @@ struct i40e_mac_filter {
u8 counter; /* number of instances of this filter */
bool is_vf; /* filter belongs to a VF */
bool is_netdev; /* filter belongs to a netdev */
- bool changed; /* filter needs to be sync'd to the HW */
- bool is_laa; /* filter is a Locally Administered Address */
+ enum i40e_filter_state state;
};
struct i40e_veb {
@@ -525,6 +530,9 @@ struct i40e_vsi {
struct i40e_ring **rx_rings;
struct i40e_ring **tx_rings;
+ u32 active_filters;
+ u32 promisc_threshold;
+
u16 work_limit;
u16 int_rate_limit; /* value in usecs */
@@ -534,9 +542,7 @@ struct i40e_vsi {
u8 *rss_lut_user; /* User configured lookup table entries */
u16 max_frame;
- u16 rx_hdr_len;
u16 rx_buf_len;
- u8 dtype;
/* List of q_vectors allocated to this VSI */
struct i40e_q_vector **q_vectors;
@@ -554,7 +560,7 @@ struct i40e_vsi {
u16 num_queue_pairs; /* Used tx and rx pairs */
u16 num_desc;
enum i40e_vsi_type type; /* VSI type, e.g., LAN, FCoE, etc */
- u16 vf_id; /* Virtual function ID for SRIOV VSIs */
+ s16 vf_id; /* Virtual function ID for SRIOV VSIs */
struct i40e_tc_configuration tc_config;
struct i40e_aqc_vsi_properties_data info;
@@ -811,6 +817,7 @@ int i40e_vlan_rx_kill_vid(struct net_device *netdev,
__always_unused __be16 proto, u16 vid);
#endif
int i40e_open(struct net_device *netdev);
+int i40e_close(struct net_device *netdev);
int i40e_vsi_open(struct i40e_vsi *vsi);
void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
@@ -823,7 +830,6 @@ bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
bool is_vf, bool is_netdev);
#ifdef I40E_FCOE
-int i40e_close(struct net_device *netdev);
int __i40e_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
struct tc_to_netdev *tc);
void i40e_netpoll(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index df8e2fd6a649..738b42a44f20 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -33,16 +33,6 @@
static void i40e_resume_aq(struct i40e_hw *hw);
/**
- * i40e_is_nvm_update_op - return true if this is an NVM update operation
- * @desc: API request descriptor
- **/
-static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
-{
- return (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_erase)) ||
- (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_update));
-}
-
-/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -624,13 +614,9 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
- hw->aq.nvm_release_on_done = false;
+ hw->nvm_release_on_done = false;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- ret_code = i40e_aq_set_hmc_resource_profile(hw,
- I40E_HMC_PROFILE_DEFAULT,
- 0,
- NULL);
ret_code = 0;
/* success! */
@@ -1023,26 +1009,7 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
- if (i40e_is_nvm_update_op(&e->desc)) {
- if (hw->aq.nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->aq.nvm_release_on_done = false;
- }
-
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
-
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
-
- default:
- break;
- }
- }
-
+ i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode));
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 12fbbddea299..d92aad38afdc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -97,7 +97,6 @@ struct i40e_adminq_info {
u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_release_on_done;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 8d5c65ab6267..11cf1a5ebccf 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -78,17 +78,17 @@ struct i40e_aq_desc {
#define I40E_AQ_FLAG_EI_SHIFT 14
#define I40E_AQ_FLAG_FE_SHIFT 15
-#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum i40e_admin_queue_err {
@@ -205,10 +205,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
- /* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
-
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
i40e_aqc_opc_set_phy_config = 0x0601,
@@ -429,6 +425,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_SDP 0x0062
#define I40E_AQ_CAP_ID_MDIO 0x0063
#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
#define I40E_AQ_CAP_ID_FLEX10 0x00F1
#define I40E_AQ_CAP_ID_CEM 0x00F2
@@ -1585,27 +1582,6 @@ struct i40e_aqc_configure_partition_bw_data {
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
- u8 pm_profile;
- u8 pe_vf_enabled;
- u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
- /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
- I40E_HMC_PROFILE_DEFAULT = 1,
- I40E_HMC_PROFILE_FAVOR_VF = 2,
- I40E_HMC_PROFILE_EQUAL = 3,
-};
-
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
-
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
@@ -1652,11 +1628,11 @@ enum i40e_aq_phy_type {
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
- I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
- I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
- I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
- I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT)
};
struct i40e_aqc_module_desc {
@@ -1857,7 +1833,10 @@ struct i40e_aqc_set_phy_debug {
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+/* Disable link manageability on a single port */
#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+/* Disable link manageability on all ports */
+#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20
u8 reserved[15];
};
@@ -1927,9 +1906,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
/* Used for 0x0704 as well as for 0x0705 commands */
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
- (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+ BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
#define I40E_AQ_ANVM_FEATURE 0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
@@ -2226,13 +2205,11 @@ I40E_CHECK_STRUCT_LEN(0x20, i40e_aqc_get_cee_dcb_cfg_resp);
*/
struct i40e_aqc_lldp_set_local_mib {
#define SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT 0
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
-#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK (1 << \
- SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_DCBX_MASK BIT(SET_LOCAL_MIB_AC_TYPE_DCBX_SHIFT)
#define SET_LOCAL_MIB_AC_TYPE_LOCAL_MIB 0x0
#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT (1)
-#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK (1 << \
- SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
+#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_MASK \
+ BIT(SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS_SHIFT)
#define SET_LOCAL_MIB_AC_TYPE_NON_WILLING_APPS 0x1
u8 type;
u8 reserved0;
@@ -2250,7 +2227,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_set_local_mib);
struct i40e_aqc_lldp_stop_start_specific_agent {
#define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0
#define I40E_AQC_START_SPECIFIC_AGENT_MASK \
- (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
+ BIT(I40E_AQC_START_SPECIFIC_AGENT_SHIFT)
u8 command;
u8 reserved[15];
};
@@ -2303,7 +2280,7 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
@@ -2323,14 +2300,13 @@ struct i40e_aqc_get_set_rss_key_data {
I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
struct i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
__le16 vsi_id;
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index 0e6ac841321c..618f18436618 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -199,6 +199,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
{
struct i40e_client_instance *cdev;
+ int ret = 0;
if (!vsi)
return;
@@ -211,7 +212,14 @@ void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
"Cannot locate client instance open routine\n");
continue;
}
- cdev->client->ops->open(&cdev->lan_info, cdev->client);
+ if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state))) {
+ ret = cdev->client->ops->open(&cdev->lan_info,
+ cdev->client);
+ if (!ret)
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state);
+ }
}
}
mutex_unlock(&i40e_client_instance_mutex);
@@ -407,12 +415,14 @@ struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf,
* i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct
* @client: pointer to a client struct in the client list.
+ * @existing: if there was already an existing instance
*
- * Returns cdev ptr on success, NULL on failure
+ * Returns cdev ptr on success or if already exists, NULL on failure
**/
static
struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
- struct i40e_client *client)
+ struct i40e_client *client,
+ bool *existing)
{
struct i40e_client_instance *cdev;
struct netdev_hw_addr *mac = NULL;
@@ -421,7 +431,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) {
if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
- cdev = NULL;
+ *existing = true;
goto out;
}
}
@@ -505,6 +515,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
{
struct i40e_client_instance *cdev;
struct i40e_client *client;
+ bool existing = false;
int ret = 0;
if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
@@ -528,18 +539,25 @@ void i40e_client_subtask(struct i40e_pf *pf)
/* check if L2 VSI is up, if not we are not ready */
if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
continue;
+ } else {
+ dev_warn(&pf->pdev->dev, "This client %s is being instanciated at probe\n",
+ client->name);
}
/* Add the client instance to the instance list */
- cdev = i40e_client_add_instance(pf, client);
+ cdev = i40e_client_add_instance(pf, client, &existing);
if (!cdev)
continue;
- /* Also up the ref_cnt of no. of instances of this client */
- atomic_inc(&client->ref_cnt);
- dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
- client->name, pf->hw.pf_id,
- pf->hw.bus.device, pf->hw.bus.func);
+ if (!existing) {
+ /* Also up the ref_cnt for no. of instances of this
+ * client.
+ */
+ atomic_inc(&client->ref_cnt);
+ dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
+ client->name, pf->hw.pf_id,
+ pf->hw.bus.device, pf->hw.bus.func);
+ }
/* Send an Open request to the client */
atomic_inc(&cdev->ref_cnt);
@@ -588,7 +606,8 @@ int i40e_lan_add_device(struct i40e_pf *pf)
pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
/* Since in some cases register may have happened before a device gets
- * added, we can schedule a subtask to go initiate the clients.
+ * added, we can schedule a subtask to go initiate the clients if
+ * they can be launched at probe time.
*/
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
i40e_service_event_schedule(pf);
@@ -980,13 +999,13 @@ int i40e_unregister_client(struct i40e_client *client)
* a close for each of the client instances that were opened.
* client_release function is called to handle this.
*/
+ mutex_lock(&i40e_client_mutex);
if (!client || i40e_client_release(client)) {
ret = -EIO;
goto out;
}
/* TODO: check if device is in reset, or if that matters? */
- mutex_lock(&i40e_client_mutex);
if (!i40e_client_is_registered(client)) {
pr_info("i40e: Client %s has not been registered\n",
client->name);
@@ -1005,8 +1024,8 @@ int i40e_unregister_client(struct i40e_client *client)
client->name);
}
- mutex_unlock(&i40e_client_mutex);
out:
+ mutex_unlock(&i40e_client_mutex);
return ret;
}
EXPORT_SYMBOL(i40e_unregister_client);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.h b/drivers/net/ethernet/intel/i40e/i40e_client.h
index bf6b453d93a1..a4601d97fb24 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.h
@@ -217,7 +217,7 @@ struct i40e_client {
#define I40E_CLIENT_FLAGS_LAUNCH_ON_PROBE BIT(0)
#define I40E_TX_FLAGS_NOTIFY_OTHER_EVENTS BIT(2)
enum i40e_client_type type;
- struct i40e_client_ops *ops; /* client ops provided by the client */
+ const struct i40e_client_ops *ops; /* client ops provided by the client */
};
static inline bool i40e_client_is_registered(struct i40e_client *client)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 4596294c2ab1..2154a34c1dd8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -60,6 +60,7 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_SFP_X722:
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_SFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
default:
@@ -295,13 +296,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
- u16 len = le16_to_cpu(aq_desc->datalen);
+ u16 len;
u8 *buf = (u8 *)buffer;
u16 i = 0;
if ((!(mask & hw->debug_mask)) || (desc == NULL))
return;
+ len = le16_to_cpu(aq_desc->datalen);
+
i40e_debug(hw, mask,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
le16_to_cpu(aq_desc->opcode),
@@ -694,7 +697,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
/* Non Tunneled IPv6 */
I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(91),
I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
@@ -1901,13 +1904,13 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
*
* Reset the external PHY.
**/
-enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
- struct i40e_asq_cmd_details *cmd_details)
+i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_phy_debug *cmd =
(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
- enum i40e_status_code status;
+ i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_debug);
@@ -1965,15 +1968,73 @@ aq_add_vsi_exit:
}
/**
+ * i40e_aq_set_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_clear_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = cpu_to_le16(0);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = cpu_to_le16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_unicast_promiscuous
* @hw: pointer to the hw struct
* @seid: vsi number
* @set: set unicast promiscuous enable/disable
* @cmd_details: pointer to command details structure or NULL
+ * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
**/
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
u16 seid, bool set,
- struct i40e_asq_cmd_details *cmd_details)
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -1986,8 +2047,9 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
- (hw->aq.api_maj_ver > 1))
+ if (rx_only_promisc &&
+ (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+ (hw->aq.api_maj_ver > 1)))
flags |= I40E_AQC_SET_VSI_PROMISC_TX;
}
@@ -2037,6 +2099,76 @@ i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_vsi_mc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any multicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_MULTICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_MULTICAST);
+ cmd->seid = cpu_to_le16(seid);
+ cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_vsi_uc_promisc_on_vlan
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @enable: set MAC L2 layer unicast promiscuous enable/disable for a given VLAN
+ * @vid: The VLAN tag filter - capture any unicast packet with this VLAN tag
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 flags = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ if (enable)
+ flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
+
+ cmd->promiscuous_flags = cpu_to_le16(flags);
+ cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
+ cmd->seid = cpu_to_le16(seid);
+ cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_broadcast
* @hw: pointer to the hw struct
* @seid: vsi number
@@ -2157,6 +2289,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_aq_desc desc;
struct i40e_aqc_add_get_update_vsi *cmd =
(struct i40e_aqc_add_get_update_vsi *)&desc.params.raw;
+ struct i40e_aqc_add_get_update_vsi_completion *resp =
+ (struct i40e_aqc_add_get_update_vsi_completion *)
+ &desc.params.raw;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -2168,6 +2303,9 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cmd_details);
+ vsi_ctx->vsis_allocated = le16_to_cpu(resp->vsi_used);
+ vsi_ctx->vsis_unallocated = le16_to_cpu(resp->vsi_free);
+
return status;
}
@@ -2205,6 +2343,35 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
}
/**
+ * i40e_aq_set_switch_config
+ * @hw: pointer to the hardware structure
+ * @flags: bit flag values to set
+ * @valid_flags: which bit flags to set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set switch configuration bits
+ **/
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_switch_config *scfg =
+ (struct i40e_aqc_set_switch_config *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_switch_config);
+ scfg->flags = cpu_to_le16(flags);
+ scfg->valid_flags = cpu_to_le16(valid_flags);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_get_firmware_version
* @hw: pointer to the hw struct
* @fw_major_version: firmware major version
@@ -2660,10 +2827,7 @@ i40e_status i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
u16 *rules_used, u16 *rules_free)
{
/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
- if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
- if (!rule_id)
- return I40E_ERR_PARAM;
- } else {
+ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
/* count and mr_list shall be valid for rule_type INGRESS VLAN
* mirroring. For other rule_type, count and rule_type should
* not matter.
@@ -2780,36 +2944,6 @@ i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
}
/**
- * i40e_aq_set_hmc_resource_profile
- * @hw: pointer to the hw struct
- * @profile: type of profile the HMC is to be set as
- * @pe_vf_enabled_count: the number of PE enabled VFs the system has
- * @cmd_details: pointer to command details structure or NULL
- *
- * set the HMC profile of the device.
- **/
-i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile profile,
- u8 pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aq_get_set_hmc_resource_profile *cmd =
- (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
- i40e_status status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_hmc_resource_profile);
-
- cmd->pm_profile = (u8)profile;
- cmd->pe_vf_enabled = pe_vf_enabled_count;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_request_resource
* @hw: pointer to the hw struct
* @resource: resource id
@@ -3073,6 +3207,9 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
break;
case I40E_AQ_CAP_ID_MSIX:
p->num_msix_vectors = number;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: MSIX vector count = %d\n",
+ p->num_msix_vectors);
break;
case I40E_AQ_CAP_ID_VF_MSIX:
p->num_msix_vectors_vf = number;
@@ -3128,6 +3265,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
p->wr_csr_prot = (u64)number;
p->wr_csr_prot |= (u64)logical_id << 32;
break;
+ case I40E_AQ_CAP_ID_NVM_MGMT:
+ if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
+ p->sec_rev_disabled = true;
+ if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
+ p->update_disabled = true;
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 0c97733d253c..05cf9a719bab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -116,6 +116,14 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer,
return len;
}
+static char *i40e_filter_state_string[] = {
+ "INVALID",
+ "NEW",
+ "ACTIVE",
+ "FAILED",
+ "REMOVE",
+};
+
/**
* i40e_dbg_dump_vsi_seid - handles dump vsi seid write into command datum
* @pf: the i40e_pf created in command write
@@ -147,9 +155,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
(unsigned long int)nd->vlan_features);
}
- if (vsi->active_vlans)
- dev_info(&pf->pdev->dev,
- " vlgrp: & = %p\n", vsi->active_vlans);
+ dev_info(&pf->pdev->dev,
+ " vlgrp: & = %p\n", vsi->active_vlans);
dev_info(&pf->pdev->dev,
" state = %li flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
vsi->state, vsi->flags,
@@ -161,10 +168,14 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
pf->hw.mac.port_addr);
list_for_each_entry(f, &vsi->mac_filter_list, list) {
dev_info(&pf->pdev->dev,
- " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d\n",
+ " mac_filter_list: %pM vid=%d, is_netdev=%d is_vf=%d counter=%d, state %s\n",
f->macaddr, f->vlan, f->is_netdev, f->is_vf,
- f->counter);
+ f->counter, i40e_filter_state_string[f->state]);
}
+ dev_info(&pf->pdev->dev, " active_filters %d, promisc_threshold %d, overflow promisc %s\n",
+ vsi->active_filters, vsi->promisc_threshold,
+ (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) ?
+ "ON" : "OFF"));
nstat = i40e_get_vsi_stats_struct(vsi);
dev_info(&pf->pdev->dev,
" net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
@@ -269,13 +280,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->queue_index,
rx_ring->reg_idx);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_hdr_len = %d, rx_buf_len = %d, dtype = %d\n",
- i, rx_ring->rx_hdr_len,
- rx_ring->rx_buf_len,
- rx_ring->dtype);
+ " rx_rings[%i]: rx_buf_len = %d\n",
+ i, rx_ring->rx_buf_len);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
- i, ring_is_ps_enabled(rx_ring),
+ " rx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
+ i,
rx_ring->next_to_use,
rx_ring->next_to_clean,
rx_ring->ring_active);
@@ -327,9 +336,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
tx_ring->queue_index,
tx_ring->reg_idx);
dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dtype = %d\n",
- i, tx_ring->dtype);
- dev_info(&pf->pdev->dev,
" tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i,
tx_ring->next_to_use,
@@ -366,8 +372,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" work_limit = %d\n",
vsi->work_limit);
dev_info(&pf->pdev->dev,
- " max_frame = %d, rx_hdr_len = %d, rx_buf_len = %d dtype = %d\n",
- vsi->max_frame, vsi->rx_hdr_len, vsi->rx_buf_len, vsi->dtype);
+ " max_frame = %d, rx_buf_len = %d dtype = %d\n",
+ vsi->max_frame, vsi->rx_buf_len, 0);
dev_info(&pf->pdev->dev,
" num_q_vectors = %i, base_vector = %i\n",
vsi->num_q_vectors, vsi->base_vector);
@@ -592,13 +598,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
" d[%03x] = 0x%016llx 0x%016llx\n",
i, txd->buffer_addr,
txd->cmd_type_offset_bsz);
- } else if (sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) {
- rxd = I40E_RX_DESC(ring, i);
- dev_info(&pf->pdev->dev,
- " d[%03x] = 0x%016llx 0x%016llx\n",
- i, rxd->read.pkt_addr,
- rxd->read.hdr_addr);
} else {
rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev,
@@ -620,13 +619,6 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
"vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n,
txd->buffer_addr, txd->cmd_type_offset_bsz);
- } else if (sizeof(union i40e_rx_desc) ==
- sizeof(union i40e_16byte_rx_desc)) {
- rxd = I40E_RX_DESC(ring, desc_n);
- dev_info(&pf->pdev->dev,
- "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
- vsi_seid, ring_id, desc_n,
- rxd->read.pkt_addr, rxd->read.hdr_addr);
} else {
rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 99257fcd1ef4..dd4457d29e98 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -44,6 +44,7 @@
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 784b1659457a..c912e041d102 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -230,12 +230,22 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
#define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
+static const char i40e_priv_flags_strings_gl[][ETH_GSTRING_LEN] = {
+ "MFP",
+ "LinkPolling",
+ "flow-director-atr",
+ "veb-stats",
+ "hw-atr-eviction",
+ "vf-true-promisc-support",
+};
+
+#define I40E_PRIV_FLAGS_GL_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings_gl)
+
static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
"NPAR",
"LinkPolling",
"flow-director-atr",
"veb-stats",
- "packet-split",
"hw-atr-eviction",
};
@@ -252,6 +262,118 @@ static void i40e_partition_setting_complaint(struct i40e_pf *pf)
}
/**
+ * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes
+ * @phy_types: PHY types to convert
+ * @supported: pointer to the ethtool supported variable to fill in
+ * @advertising: pointer to the ethtool advertising variable to fill in
+ *
+ **/
+static void i40e_phy_type_to_ethtool(struct i40e_pf *pf, u32 *supported,
+ u32 *advertising)
+{
+ enum i40e_aq_capabilities_phy_type phy_types = pf->hw.phy.phy_types;
+ struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info;
+ *supported = 0x0;
+ *advertising = 0x0;
+
+ if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ *advertising |= ADVERTISED_1000baseT_Full;
+ if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
+ *supported |= SUPPORTED_100baseT_Full;
+ *advertising |= ADVERTISED_100baseT_Full;
+ }
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XFI ||
+ phy_types & I40E_CAP_PHY_TYPE_SFI ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
+ *supported |= SUPPORTED_10000baseT_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
+ phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_10000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ *advertising |= ADVERTISED_10000baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
+ phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
+ *supported |= SUPPORTED_40000baseCR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
+ phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_40000baseCR4_Full;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB)
+ *advertising |= ADVERTISED_40000baseCR4_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_100baseT_Full;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ *advertising |= ADVERTISED_100baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
+ phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
+ *supported |= SUPPORTED_Autoneg |
+ SUPPORTED_1000baseT_Full;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ *advertising |= ADVERTISED_1000baseT_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
+ *supported |= SUPPORTED_40000baseSR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
+ *supported |= SUPPORTED_40000baseLR4_Full;
+ if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
+ *supported |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_Autoneg;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
+ *supported |= SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB)
+ *advertising |= ADVERTISED_20000baseKR2_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
+ *supported |= SUPPORTED_10000baseKR_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ *advertising |= ADVERTISED_10000baseKR_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
+ *supported |= SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
+ *advertising |= ADVERTISED_10000baseKX4_Full;
+ }
+ if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
+ *supported |= SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg;
+ *advertising |= ADVERTISED_Autoneg;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
+ *advertising |= ADVERTISED_1000baseKX_Full;
+ }
+}
+
+/**
* i40e_get_settings_link_up - Get the Link settings for when link is up
* @hw: hw structure
* @ecmd: ethtool command to fill in
@@ -265,6 +387,8 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
{
struct i40e_link_status *hw_link_info = &hw->phy.link_info;
u32 link_speed = hw_link_info->link_speed;
+ u32 e_advertising = 0x0;
+ u32 e_supported = 0x0;
/* Initialize supported and advertised settings based on phy settings */
switch (hw_link_info->phy_type) {
@@ -305,14 +429,18 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
break;
case I40E_PHY_TYPE_10GBASE_T:
case I40E_PHY_TYPE_1000BASE_T:
+ case I40E_PHY_TYPE_100BASE_TX:
ecmd->supported = SUPPORTED_Autoneg |
SUPPORTED_10000baseT_Full |
- SUPPORTED_1000baseT_Full;
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full;
ecmd->advertising = ADVERTISED_Autoneg;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB)
ecmd->advertising |= ADVERTISED_10000baseT_Full;
if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB)
ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
+ ecmd->advertising |= ADVERTISED_100baseT_Full;
break;
case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
ecmd->supported = SUPPORTED_Autoneg |
@@ -320,12 +448,6 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ecmd->advertising = ADVERTISED_Autoneg |
ADVERTISED_1000baseT_Full;
break;
- case I40E_PHY_TYPE_100BASE_TX:
- ecmd->supported = SUPPORTED_Autoneg |
- SUPPORTED_100baseT_Full;
- if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB)
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- break;
case I40E_PHY_TYPE_10GBASE_CR1_CU:
case I40E_PHY_TYPE_10GBASE_CR1:
ecmd->supported = SUPPORTED_Autoneg |
@@ -339,6 +461,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
case I40E_PHY_TYPE_10GBASE_SFPP_CU:
case I40E_PHY_TYPE_10GBASE_AOC:
ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = SUPPORTED_10000baseT_Full;
break;
case I40E_PHY_TYPE_SGMII:
ecmd->supported = SUPPORTED_Autoneg |
@@ -352,14 +475,23 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
ecmd->advertising |= ADVERTISED_100baseT_Full;
}
break;
- /* Backplane is set based on supported phy types in get_settings
- * so don't set anything here but don't warn either
- */
case I40E_PHY_TYPE_40GBASE_KR4:
case I40E_PHY_TYPE_20GBASE_KR2:
case I40E_PHY_TYPE_10GBASE_KR:
case I40E_PHY_TYPE_10GBASE_KX4:
case I40E_PHY_TYPE_1000BASE_KX:
+ ecmd->supported |= SUPPORTED_40000baseKR4_Full |
+ SUPPORTED_20000baseKR2_Full |
+ SUPPORTED_10000baseKR_Full |
+ SUPPORTED_10000baseKX4_Full |
+ SUPPORTED_1000baseKX_Full |
+ SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_40000baseKR4_Full |
+ ADVERTISED_20000baseKR2_Full |
+ ADVERTISED_10000baseKR_Full |
+ ADVERTISED_10000baseKX4_Full |
+ ADVERTISED_1000baseKX_Full |
+ ADVERTISED_Autoneg;
break;
default:
/* if we got here and link is up something bad is afoot */
@@ -367,6 +499,16 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw,
hw_link_info->phy_type);
}
+ /* Now that we've worked out everything that could be supported by the
+ * current PHY type, get what is supported by the NVM and them to
+ * get what is truly supported
+ */
+ i40e_phy_type_to_ethtool(pf, &e_supported,
+ &e_advertising);
+
+ ecmd->supported = ecmd->supported & e_supported;
+ ecmd->advertising = ecmd->advertising & e_advertising;
+
/* Set speed and duplex */
switch (link_speed) {
case I40E_LINK_SPEED_40GB:
@@ -401,74 +543,11 @@ static void i40e_get_settings_link_down(struct i40e_hw *hw,
struct ethtool_cmd *ecmd,
struct i40e_pf *pf)
{
- enum i40e_aq_capabilities_phy_type phy_types = hw->phy.phy_types;
-
/* link is down and the driver needs to fall back on
* supported phy types to figure out what info to display
*/
- ecmd->supported = 0x0;
- ecmd->advertising = 0x0;
- if (phy_types & I40E_CAP_PHY_TYPE_SGMII) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
- if (pf->hw.mac.type == I40E_MAC_X722) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- if (pf->flags & I40E_FLAG_100M_SGMII_CAPABLE) {
- ecmd->supported |= SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_100baseT_Full;
- }
- }
- }
- if (phy_types & I40E_CAP_PHY_TYPE_XAUI ||
- phy_types & I40E_CAP_PHY_TYPE_XFI ||
- phy_types & I40E_CAP_PHY_TYPE_SFI ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_AOC)
- ecmd->supported |= SUPPORTED_10000baseT_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1_CU ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_CR1 ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_T ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_SR ||
- phy_types & I40E_CAP_PHY_TYPE_10GBASE_LR) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_10000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_10000baseT_Full;
- }
- if (phy_types & I40E_CAP_PHY_TYPE_XLAUI ||
- phy_types & I40E_CAP_PHY_TYPE_XLPPI ||
- phy_types & I40E_CAP_PHY_TYPE_40GBASE_AOC)
- ecmd->supported |= SUPPORTED_40000baseCR4_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4_CU ||
- phy_types & I40E_CAP_PHY_TYPE_40GBASE_CR4) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_40000baseCR4_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_40000baseCR4_Full;
- }
- if ((phy_types & I40E_CAP_PHY_TYPE_100BASE_TX) &&
- !(phy_types & I40E_CAP_PHY_TYPE_1000BASE_T)) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_100baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_100baseT_Full;
- }
- if (phy_types & I40E_CAP_PHY_TYPE_1000BASE_T ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_SX ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_LX ||
- phy_types & I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL) {
- ecmd->supported |= SUPPORTED_Autoneg |
- SUPPORTED_1000baseT_Full;
- ecmd->advertising |= ADVERTISED_Autoneg |
- ADVERTISED_1000baseT_Full;
- }
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_SR4)
- ecmd->supported |= SUPPORTED_40000baseSR4_Full;
- if (phy_types & I40E_CAP_PHY_TYPE_40GBASE_LR4)
- ecmd->supported |= SUPPORTED_40000baseLR4_Full;
+ i40e_phy_type_to_ethtool(pf, &ecmd->supported,
+ &ecmd->advertising);
/* With no link speed and duplex are unknown */
ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
@@ -497,38 +576,6 @@ static int i40e_get_settings(struct net_device *netdev,
i40e_get_settings_link_down(hw, ecmd, pf);
/* Now set the settings that don't rely on link being up/down */
-
- /* For backplane, supported and advertised are only reliant on the
- * phy types the NVM specifies are supported.
- */
- if (hw->device_id == I40E_DEV_ID_KX_B ||
- hw->device_id == I40E_DEV_ID_KX_C ||
- hw->device_id == I40E_DEV_ID_20G_KR2 ||
- hw->device_id == I40E_DEV_ID_20G_KR2_A) {
- ecmd->supported = SUPPORTED_Autoneg;
- ecmd->advertising = ADVERTISED_Autoneg;
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_40GBASE_KR4) {
- ecmd->supported |= SUPPORTED_40000baseKR4_Full;
- ecmd->advertising |= ADVERTISED_40000baseKR4_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_20GBASE_KR2) {
- ecmd->supported |= SUPPORTED_20000baseKR2_Full;
- ecmd->advertising |= ADVERTISED_20000baseKR2_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KR) {
- ecmd->supported |= SUPPORTED_10000baseKR_Full;
- ecmd->advertising |= ADVERTISED_10000baseKR_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_10GBASE_KX4) {
- ecmd->supported |= SUPPORTED_10000baseKX4_Full;
- ecmd->advertising |= ADVERTISED_10000baseKX4_Full;
- }
- if (hw->phy.phy_types & I40E_CAP_PHY_TYPE_1000BASE_KX) {
- ecmd->supported |= SUPPORTED_1000baseKX_Full;
- ecmd->advertising |= ADVERTISED_1000baseKX_Full;
- }
- }
-
/* Set autoneg settings */
ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
AUTONEG_ENABLE : AUTONEG_DISABLE);
@@ -625,6 +672,7 @@ static int i40e_set_settings(struct net_device *netdev,
if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE &&
+ hw->phy.media_type != I40E_MEDIA_TYPE_DA &&
hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
return -EOPNOTSUPP;
@@ -1143,6 +1191,10 @@ static void i40e_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
+ if (pf->hw.pf_id == 0)
+ drvinfo->n_priv_flags = I40E_PRIV_FLAGS_GL_STR_LEN;
+ else
+ drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
}
static void i40e_get_ringparam(struct net_device *netdev,
@@ -1259,6 +1311,13 @@ static int i40e_set_ringparam(struct net_device *netdev,
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
+ /* this is to allow wr32 to have something to write to
+ * during early allocation of Rx buffers
+ */
+ u32 __iomem faketail = 0;
+ struct i40e_ring *ring;
+ u16 unused;
+
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count;
@@ -1267,12 +1326,22 @@ static int i40e_set_ringparam(struct net_device *netdev,
*/
rx_rings[i].desc = NULL;
rx_rings[i].rx_bi = NULL;
+ rx_rings[i].tail = (u8 __iomem *)&faketail;
err = i40e_setup_rx_descriptors(&rx_rings[i]);
+ if (err)
+ goto rx_unwind;
+
+ /* now allocate the Rx buffers to make sure the OS
+ * has enough memory, any failure here means abort
+ */
+ ring = &rx_rings[i];
+ unused = I40E_DESC_UNUSED(ring);
+ err = i40e_alloc_rx_buffers(ring, unused);
+rx_unwind:
if (err) {
- while (i) {
- i--;
+ do {
i40e_free_rx_resources(&rx_rings[i]);
- }
+ } while (i--);
kfree(rx_rings);
rx_rings = NULL;
@@ -1298,6 +1367,17 @@ static int i40e_set_ringparam(struct net_device *netdev,
if (rx_rings) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
i40e_free_rx_resources(vsi->rx_rings[i]);
+ /* get the real tail offset */
+ rx_rings[i].tail = vsi->rx_rings[i]->tail;
+ /* this is to fake out the allocation routine
+ * into thinking it has to realloc everything
+ * but the recycling logic will let us re-use
+ * the buffers allocated above
+ */
+ rx_rings[i].next_to_use = 0;
+ rx_rings[i].next_to_clean = 0;
+ rx_rings[i].next_to_alloc = 0;
+ /* do a struct copy */
*vsi->rx_rings[i] = rx_rings[i];
}
kfree(rx_rings);
@@ -1342,7 +1422,10 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
return I40E_VSI_STATS_LEN(netdev);
}
case ETH_SS_PRIV_FLAGS:
- return I40E_PRIV_FLAGS_STR_LEN;
+ if (pf->hw.pf_id == 0)
+ return I40E_PRIV_FLAGS_GL_STR_LEN;
+ else
+ return I40E_PRIV_FLAGS_STR_LEN;
default:
return -EOPNOTSUPP;
}
@@ -1540,10 +1623,18 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
break;
case ETH_SS_PRIV_FLAGS:
- for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
- memcpy(data, i40e_priv_flags_strings[i],
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
+ if (pf->hw.pf_id == 0) {
+ for (i = 0; i < I40E_PRIV_FLAGS_GL_STR_LEN; i++) {
+ memcpy(data, i40e_priv_flags_strings_gl[i],
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ } else {
+ for (i = 0; i < I40E_PRIV_FLAGS_STR_LEN; i++) {
+ memcpy(data, i40e_priv_flags_strings[i],
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
}
break;
default:
@@ -1714,7 +1805,7 @@ static void i40e_diag_test(struct net_device *netdev,
/* If the device is online then take it offline */
if (if_running)
/* indicate we're in test mode */
- dev_close(netdev);
+ i40e_close(netdev);
else
/* This reset does not affect link - if it is
* changed to a type of reset that does affect
@@ -1743,7 +1834,7 @@ static void i40e_diag_test(struct net_device *netdev,
i40e_do_reset(pf, BIT(__I40E_PF_RESET_REQUESTED));
if (if_running)
- dev_open(netdev);
+ i40e_open(netdev);
} else {
/* Online tests */
netif_info(pf, drv, netdev, "online testing starting\n");
@@ -1837,7 +1928,7 @@ static int i40e_set_phys_id(struct net_device *netdev,
if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
pf->led_status = i40e_led_get(hw);
} else {
- i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_PORT, NULL);
+ i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_ALL, NULL);
ret = i40e_led_get_phy(hw, &temp_status,
&pf->phy_led_val);
pf->led_status = temp_status;
@@ -2490,7 +2581,6 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
if (!vsi)
return -EINVAL;
-
pf = vsi->back;
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
@@ -2548,15 +2638,18 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
if (ntohl(fsp->m_ext.data[1])) {
- if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) {
- netif_info(pf, drv, vsi->netdev, "Invalid VF id\n");
+ vf_id = ntohl(fsp->h_ext.data[1]);
+ if (vf_id >= pf->num_alloc_vfs) {
+ netif_info(pf, drv, vsi->netdev,
+ "Invalid VF id %d\n", vf_id);
goto free_input;
}
- vf_id = ntohl(fsp->h_ext.data[1]);
/* Find vsi id from vf id and override dest vsi */
input->dest_vsi = pf->vf[vf_id].lan_vsi_id;
if (input->q_index >= pf->vf[vf_id].num_queue_pairs) {
- netif_info(pf, drv, vsi->netdev, "Invalid queue id\n");
+ netif_info(pf, drv, vsi->netdev,
+ "Invalid queue id %d for VF %d\n",
+ input->q_index, vf_id);
goto free_input;
}
}
@@ -2803,18 +2896,18 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
struct i40e_pf *pf = vsi->back;
u32 ret_flags = 0;
- ret_flags |= pf->hw.func_caps.npar_enable ?
- I40E_PRIV_FLAGS_NPAR_FLAG : 0;
ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
ret_flags |= pf->flags & I40E_FLAG_FD_ATR_ENABLED ?
I40E_PRIV_FLAGS_FD_ATR : 0;
ret_flags |= pf->flags & I40E_FLAG_VEB_STATS_ENABLED ?
I40E_PRIV_FLAGS_VEB_STATS : 0;
- ret_flags |= pf->flags & I40E_FLAG_RX_PS_ENABLED ?
- I40E_PRIV_FLAGS_PS : 0;
ret_flags |= pf->auto_disable_flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE ?
0 : I40E_PRIV_FLAGS_HW_ATR_EVICT;
+ if (pf->hw.pf_id == 0) {
+ ret_flags |= pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT ?
+ I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT : 0;
+ }
return ret_flags;
}
@@ -2829,27 +2922,13 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ u16 sw_flags = 0, valid_flags = 0;
bool reset_required = false;
+ bool promisc_change = false;
+ int ret;
/* NOTE: MFP is not settable */
- /* allow the user to control the method of receive
- * buffer DMA, whether the packet is split at header
- * boundaries into two separate buffers. In some cases
- * one routine or the other will perform better.
- */
- if ((flags & I40E_PRIV_FLAGS_PS) &&
- !(pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
- pf->flags |= I40E_FLAG_RX_PS_ENABLED;
- pf->flags &= ~I40E_FLAG_RX_1BUF_ENABLED;
- reset_required = true;
- } else if (!(flags & I40E_PRIV_FLAGS_PS) &&
- (pf->flags & I40E_FLAG_RX_PS_ENABLED)) {
- pf->flags &= ~I40E_FLAG_RX_PS_ENABLED;
- pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
- reset_required = true;
- }
-
if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
else
@@ -2876,6 +2955,33 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
reset_required = true;
}
+ if (pf->hw.pf_id == 0) {
+ if ((flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
+ !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+ pf->flags |= I40E_FLAG_TRUE_PROMISC_SUPPORT;
+ promisc_change = true;
+ } else if (!(flags & I40E_PRIV_FLAGS_TRUE_PROMISC_SUPPORT) &&
+ (pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT)) {
+ pf->flags &= ~I40E_FLAG_TRUE_PROMISC_SUPPORT;
+ promisc_change = true;
+ }
+ }
+ if (promisc_change) {
+ if (!(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+ sw_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags,
+ NULL);
+ if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ dev_info(&pf->pdev->dev,
+ "couldn't set switch config bits, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ /* not a fatal problem, just keep going */
+ }
+ }
+
if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
(pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
pf->auto_disable_flags &= ~I40E_FLAG_HW_ATR_EVICT_CAPABLE;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 8ad162c16f61..58e6c1570335 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
- * Copyright(c) 2013 - 2015 Intel Corporation.
+ * Copyright(c) 2013 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,16 +38,6 @@
#include "i40e_fcoe.h"
/**
- * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
- * @ptype: the packet type field from rx descriptor write-back
- **/
-static inline bool i40e_rx_is_fcoe(u16 ptype)
-{
- return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
- (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
-}
-
-/**
* i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF
* @sof: the FCoE start of frame delimiter
**/
@@ -1371,7 +1361,7 @@ static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb,
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
- count = TXD_USE_COUNT(skb->len);
+ count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index 5ebe12d56ebf..a7c7b1d9b7c8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -49,7 +49,7 @@ i40e_status i40e_add_sd_table_entry(struct i40e_hw *hw,
struct i40e_hmc_sd_entry *sd_entry;
bool dma_mem_alloc_done = false;
struct i40e_dma_mem mem;
- i40e_status ret_code;
+ i40e_status ret_code = I40E_SUCCESS;
u64 alloc_len;
if (NULL == hmc_info->sd_table.sd_entry) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 344912957cab..d0b3a1bb82ca 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -31,12 +31,7 @@
/* Local includes */
#include "i40e.h"
#include "i40e_diag.h"
-#if IS_ENABLED(CONFIG_VXLAN)
-#include <net/vxlan.h>
-#endif
-#if IS_ENABLED(CONFIG_GENEVE)
-#include <net/geneve.h>
-#endif
+#include <net/udp_tunnel.h>
const char i40e_driver_name[] = "i40e";
static const char i40e_driver_string[] =
@@ -45,8 +40,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 25
+#define DRV_VERSION_MINOR 6
+#define DRV_VERSION_BUILD 11
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -90,6 +85,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_I_X722), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
/* required last entry */
@@ -326,7 +322,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
unsigned long trans_start;
q = netdev_get_tx_queue(netdev, i);
- trans_start = q->trans_start ? : netdev->trans_start;
+ trans_start = q->trans_start;
if (netif_xmit_stopped(q) &&
time_after(jiffies,
(trans_start + netdev->watchdog_timeo))) {
@@ -396,24 +392,6 @@ static void i40e_tx_timeout(struct net_device *netdev)
}
/**
- * i40e_release_rx_desc - Store the new tail and head values
- * @rx_ring: ring to bump
- * @val: new head index
- **/
-static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
-{
- rx_ring->next_to_use = val;
-
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch. (Only
- * applicable for weak-ordered memory model archs,
- * such as IA-64).
- */
- wmb();
- writel(val, rx_ring->tail);
-}
-
-/**
* i40e_get_vsi_stats_struct - Get System Network Statistics
* @vsi: the VSI we care about
*
@@ -1296,8 +1274,9 @@ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
(is_vf == f->is_vf) &&
(is_netdev == f->is_netdev)) {
f->counter--;
- f->changed = true;
changed = 1;
+ if (f->counter == 0)
+ f->state = I40E_FILTER_REMOVE;
}
}
if (changed) {
@@ -1313,29 +1292,32 @@ int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
* @vsi: the PF Main VSI - inappropriate for any other VSI
* @macaddr: the MAC address
*
- * Some older firmware configurations set up a default promiscuous VLAN
- * filter that needs to be removed.
+ * Remove whatever filter the firmware set up so the driver can manage
+ * its own filtering intelligently.
**/
-static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
+static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
{
struct i40e_aqc_remove_macvlan_element_data element;
struct i40e_pf *pf = vsi->back;
- i40e_status ret;
/* Only appropriate for the PF main VSI */
if (vsi->type != I40E_VSI_MAIN)
- return -EINVAL;
+ return;
memset(&element, 0, sizeof(element));
ether_addr_copy(element.mac_addr, macaddr);
element.vlan_tag = 0;
- element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
- I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
- ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
- if (ret)
- return -ENOENT;
+ /* Ignore error returns, some firmware does it this way... */
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
- return 0;
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.vlan_tag = 0;
+ /* ...and some firmware does it this way. */
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
}
/**
@@ -1356,10 +1338,18 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
bool is_vf, bool is_netdev)
{
struct i40e_mac_filter *f;
+ int changed = false;
if (!vsi || !macaddr)
return NULL;
+ /* Do not allow broadcast filter to be added since broadcast filter
+ * is added as part of add VSI for any newly created VSI except
+ * FDIR VSI
+ */
+ if (is_broadcast_ether_addr(macaddr))
+ return NULL;
+
f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
@@ -1368,8 +1358,15 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
ether_addr_copy(f->macaddr, macaddr);
f->vlan = vlan;
- f->changed = true;
-
+ /* If we're in overflow promisc mode, set the state directly
+ * to failed, so we don't bother to try sending the filter
+ * to the hardware.
+ */
+ if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))
+ f->state = I40E_FILTER_FAILED;
+ else
+ f->state = I40E_FILTER_NEW;
+ changed = true;
INIT_LIST_HEAD(&f->list);
list_add_tail(&f->list, &vsi->mac_filter_list);
}
@@ -1389,10 +1386,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
f->counter++;
}
- /* changed tells sync_filters_subtask to
- * push the filter down to the firmware
- */
- if (f->changed) {
+ if (changed) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
}
@@ -1411,6 +1405,9 @@ add_filter_out:
*
* NOTE: This function is expected to be called with mac_filter_list_lock
* being held.
+ * ANOTHER NOTE: This function MUST be called from within the context of
+ * the "safe" variants of any list iterators, e.g. list_for_each_entry_safe()
+ * instead of list_for_each_entry().
**/
void i40e_del_filter(struct i40e_vsi *vsi,
u8 *macaddr, s16 vlan,
@@ -1450,9 +1447,18 @@ void i40e_del_filter(struct i40e_vsi *vsi,
* remove the filter from the firmware's list
*/
if (f->counter == 0) {
- f->changed = true;
- vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
- vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ if ((f->state == I40E_FILTER_FAILED) ||
+ (f->state == I40E_FILTER_NEW)) {
+ /* this one never got added by the FW. Just remove it,
+ * no need to sync anything.
+ */
+ list_del(&f->list);
+ kfree(f);
+ } else {
+ f->state = I40E_FILTER_REMOVE;
+ vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
+ vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
+ }
}
}
@@ -1474,7 +1480,6 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct sockaddr *addr = p;
- struct i40e_mac_filter *f;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
@@ -1495,52 +1500,23 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
else
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ i40e_del_mac_all_vlan(vsi, netdev->dev_addr, false, true);
+ i40e_put_mac_in_vlan(vsi, addr->sa_data, false, true);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
ret = i40e_aq_mac_address_write(&vsi->back->hw,
I40E_AQC_WRITE_TYPE_LAA_WOL,
addr->sa_data, NULL);
- if (ret) {
- netdev_info(netdev,
- "Addr change for Main VSI failed: %d\n",
- ret);
- return -EADDRNOTAVAIL;
- }
- }
-
- if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
- struct i40e_aqc_remove_macvlan_element_data element;
-
- memset(&element, 0, sizeof(element));
- ether_addr_copy(element.mac_addr, netdev->dev_addr);
- element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
- i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
- } else {
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
- false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- }
-
- if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
- struct i40e_aqc_add_macvlan_element_data element;
-
- memset(&element, 0, sizeof(element));
- ether_addr_copy(element.mac_addr, hw->mac.addr);
- element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
- i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
- } else {
- spin_lock_bh(&vsi->mac_filter_list_lock);
- f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
- false, false);
- if (f)
- f->is_laa = true;
- spin_unlock_bh(&vsi->mac_filter_list_lock);
+ if (ret)
+ netdev_info(netdev, "Ignoring error from firmware on LAA update, status %s, AQ ret %s\n",
+ i40e_stat_str(hw, ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
}
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
-
/* schedule our worker thread which will take care of
* applying the new filter changes
*/
@@ -1600,14 +1576,8 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Number of queues per enabled TC */
- /* In MFP case we can have a much lower count of MSIx
- * vectors available and so we need to lower the used
- * q count.
- */
- if (pf->flags & I40E_FLAG_MSIX_ENABLED)
- qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
- else
- qcount = vsi->alloc_queue_pairs;
+ qcount = vsi->alloc_queue_pairs;
+
num_tc_qps = qcount / numtc;
num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
@@ -1777,28 +1747,6 @@ bottom_of_search_loop:
}
/**
- * i40e_mac_filter_entry_clone - Clones a MAC filter entry
- * @src: source MAC filter entry to be clones
- *
- * Returns the pointer to newly cloned MAC filter entry or NULL
- * in case of error
- **/
-static struct i40e_mac_filter *i40e_mac_filter_entry_clone(
- struct i40e_mac_filter *src)
-{
- struct i40e_mac_filter *f;
-
- f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (!f)
- return NULL;
- *f = *src;
-
- INIT_LIST_HEAD(&f->list);
-
- return f;
-}
-
-/**
* i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
* @vsi: pointer to vsi struct
* @from: Pointer to list which contains MAC filter entries - changes to
@@ -1812,41 +1760,61 @@ static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
struct i40e_mac_filter *f, *ftmp;
list_for_each_entry_safe(f, ftmp, from, list) {
- f->changed = true;
/* Move the element back into MAC filter list*/
list_move_tail(&f->list, &vsi->mac_filter_list);
}
}
/**
- * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
- * @vsi: pointer to vsi struct
+ * i40e_update_filter_state - Update filter state based on return data
+ * from firmware
+ * @count: Number of filters added
+ * @add_list: return data from fw
+ * @head: pointer to first filter in current batch
+ * @aq_err: status from fw
*
- * MAC filter entries from list were slated to be added from device.
+ * MAC filter entries from list were slated to be added to device. Returns
+ * number of successful filters. Note that 0 does NOT mean success!
**/
-static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi)
+static int
+i40e_update_filter_state(int count,
+ struct i40e_aqc_add_macvlan_element_data *add_list,
+ struct i40e_mac_filter *add_head, int aq_err)
{
- struct i40e_mac_filter *f, *ftmp;
+ int retval = 0;
+ int i;
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- if (!f->changed && f->counter)
- f->changed = true;
- }
-}
-/**
- * i40e_cleanup_add_list - Deletes the element from add list and release
- * memory
- * @add_list: Pointer to list which contains MAC filter entries
- **/
-static void i40e_cleanup_add_list(struct list_head *add_list)
-{
- struct i40e_mac_filter *f, *ftmp;
-
- list_for_each_entry_safe(f, ftmp, add_list, list) {
- list_del(&f->list);
- kfree(f);
+ if (!aq_err) {
+ retval = count;
+ /* Everything's good, mark all filters active. */
+ for (i = 0; i < count ; i++) {
+ add_head->state = I40E_FILTER_ACTIVE;
+ add_head = list_next_entry(add_head, list);
+ }
+ } else if (aq_err == I40E_AQ_RC_ENOSPC) {
+ /* Device ran out of filter space. Check the return value
+ * for each filter to see which ones are active.
+ */
+ for (i = 0; i < count ; i++) {
+ if (add_list[i].match_method ==
+ I40E_AQC_MM_ERR_NO_RES) {
+ add_head->state = I40E_FILTER_FAILED;
+ } else {
+ add_head->state = I40E_FILTER_ACTIVE;
+ retval++;
+ }
+ add_head = list_next_entry(add_head, list);
+ }
+ } else {
+ /* Some other horrible thing happened, fail all filters */
+ retval = 0;
+ for (i = 0; i < count ; i++) {
+ add_head->state = I40E_FILTER_FAILED;
+ add_head = list_next_entry(add_head, list);
+ }
}
+ return retval;
}
/**
@@ -1859,20 +1827,22 @@ static void i40e_cleanup_add_list(struct list_head *add_list)
**/
int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
{
- struct list_head tmp_del_list, tmp_add_list;
- struct i40e_mac_filter *f, *ftmp, *fclone;
- bool promisc_forced_on = false;
- bool add_happened = false;
+ struct i40e_mac_filter *f, *ftmp, *add_head = NULL;
+ struct list_head tmp_add_list, tmp_del_list;
+ struct i40e_hw *hw = &vsi->back->hw;
+ bool promisc_changed = false;
+ char vsi_name[16] = "PF";
int filter_list_len = 0;
u32 changed_flags = 0;
i40e_status aq_ret = 0;
- bool err_cond = false;
int retval = 0;
struct i40e_pf *pf;
int num_add = 0;
int num_del = 0;
int aq_err = 0;
u16 cmd_flags;
+ int list_size;
+ int fcnt;
/* empty array typed pointers, kcalloc later */
struct i40e_aqc_add_macvlan_element_data *add_list;
@@ -1887,72 +1857,46 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
vsi->current_netdev_flags = vsi->netdev->flags;
}
- INIT_LIST_HEAD(&tmp_del_list);
INIT_LIST_HEAD(&tmp_add_list);
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ if (vsi->type == I40E_VSI_SRIOV)
+ snprintf(vsi_name, sizeof(vsi_name) - 1, "VF %d", vsi->vf_id);
+ else if (vsi->type != I40E_VSI_MAIN)
+ snprintf(vsi_name, sizeof(vsi_name) - 1, "vsi %d", vsi->seid);
if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
spin_lock_bh(&vsi->mac_filter_list_lock);
+ /* Create a list of filters to delete. */
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- if (!f->changed)
- continue;
-
- if (f->counter != 0)
- continue;
- f->changed = false;
-
- /* Move the element into temporary del_list */
- list_move_tail(&f->list, &tmp_del_list);
- }
-
- list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- if (!f->changed)
- continue;
-
- if (f->counter == 0)
- continue;
- f->changed = false;
-
- /* Clone MAC filter entry and add into temporary list */
- fclone = i40e_mac_filter_entry_clone(f);
- if (!fclone) {
- err_cond = true;
- break;
+ if (f->state == I40E_FILTER_REMOVE) {
+ WARN_ON(f->counter != 0);
+ /* Move the element into temporary del_list */
+ list_move_tail(&f->list, &tmp_del_list);
+ vsi->active_filters--;
+ }
+ if (f->state == I40E_FILTER_NEW) {
+ WARN_ON(f->counter == 0);
+ /* Move the element into temporary add_list */
+ list_move_tail(&f->list, &tmp_add_list);
}
- list_add_tail(&fclone->list, &tmp_add_list);
- }
-
- /* if failed to clone MAC filter entry - undo */
- if (err_cond) {
- i40e_undo_del_filter_entries(vsi, &tmp_del_list);
- i40e_undo_add_filter_entries(vsi);
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
-
- if (err_cond) {
- i40e_cleanup_add_list(&tmp_add_list);
- retval = -ENOMEM;
- goto out;
- }
}
/* Now process 'del_list' outside the lock */
if (!list_empty(&tmp_del_list)) {
- int del_list_size;
-
- filter_list_len = pf->hw.aq.asq_buf_size /
+ filter_list_len = hw->aq.asq_buf_size /
sizeof(struct i40e_aqc_remove_macvlan_element_data);
- del_list_size = filter_list_len *
+ list_size = filter_list_len *
sizeof(struct i40e_aqc_remove_macvlan_element_data);
- del_list = kzalloc(del_list_size, GFP_ATOMIC);
+ del_list = kzalloc(list_size, GFP_ATOMIC);
if (!del_list) {
- i40e_cleanup_add_list(&tmp_add_list);
-
/* Undo VSI's MAC filter entry element updates */
spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_undo_del_filter_entries(vsi, &tmp_del_list);
- i40e_undo_add_filter_entries(vsi);
spin_unlock_bh(&vsi->mac_filter_list_lock);
retval = -ENOMEM;
goto out;
@@ -1963,9 +1907,13 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* add to delete list */
ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
- del_list[num_del].vlan_tag =
- cpu_to_le16((u16)(f->vlan ==
- I40E_VLAN_ANY ? 0 : f->vlan));
+ if (f->vlan == I40E_VLAN_ANY) {
+ del_list[num_del].vlan_tag = 0;
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ } else {
+ del_list[num_del].vlan_tag =
+ cpu_to_le16((u16)(f->vlan));
+ }
cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
del_list[num_del].flags = cmd_flags;
@@ -1973,21 +1921,23 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_del == filter_list_len) {
- aq_ret = i40e_aq_remove_macvlan(&pf->hw,
- vsi->seid,
+ aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid,
del_list,
- num_del,
- NULL);
- aq_err = pf->hw.aq.asq_last_status;
+ num_del, NULL);
+ aq_err = hw->aq.asq_last_status;
num_del = 0;
- memset(del_list, 0, del_list_size);
+ memset(del_list, 0, list_size);
- if (aq_ret && aq_err != I40E_AQ_RC_ENOENT) {
+ /* Explicitly ignore and do not report when
+ * firmware returns ENOENT.
+ */
+ if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
retval = -EIO;
- dev_err(&pf->pdev->dev,
- "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ dev_info(&pf->pdev->dev,
+ "ignoring delete macvlan error on %s, err %s, aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, aq_err));
}
}
/* Release memory for MAC filter entries which were
@@ -1998,17 +1948,22 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
if (num_del) {
- aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
- del_list, num_del,
- NULL);
- aq_err = pf->hw.aq.asq_last_status;
+ aq_ret = i40e_aq_remove_macvlan(hw, vsi->seid, del_list,
+ num_del, NULL);
+ aq_err = hw->aq.asq_last_status;
num_del = 0;
- if (aq_ret && aq_err != I40E_AQ_RC_ENOENT)
+ /* Explicitly ignore and do not report when firmware
+ * returns ENOENT.
+ */
+ if (aq_ret && !(aq_err == I40E_AQ_RC_ENOENT)) {
+ retval = -EIO;
dev_info(&pf->pdev->dev,
- "ignoring delete macvlan error, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
+ "ignoring delete macvlan error on %s, err %s aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, aq_err));
+ }
}
kfree(del_list);
@@ -2016,87 +1971,126 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
if (!list_empty(&tmp_add_list)) {
- int add_list_size;
-
- /* do all the adds now */
- filter_list_len = pf->hw.aq.asq_buf_size /
- sizeof(struct i40e_aqc_add_macvlan_element_data),
- add_list_size = filter_list_len *
+ /* Do all the adds now. */
+ filter_list_len = hw->aq.asq_buf_size /
sizeof(struct i40e_aqc_add_macvlan_element_data);
- add_list = kzalloc(add_list_size, GFP_ATOMIC);
+ list_size = filter_list_len *
+ sizeof(struct i40e_aqc_add_macvlan_element_data);
+ add_list = kzalloc(list_size, GFP_ATOMIC);
if (!add_list) {
- /* Purge element from temporary lists */
- i40e_cleanup_add_list(&tmp_add_list);
-
- /* Undo add filter entries from VSI MAC filter list */
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_undo_add_filter_entries(vsi);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
retval = -ENOMEM;
goto out;
}
-
- list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
-
- add_happened = true;
- cmd_flags = 0;
-
+ num_add = 0;
+ list_for_each_entry(f, &tmp_add_list, list) {
+ if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state)) {
+ f->state = I40E_FILTER_FAILED;
+ continue;
+ }
/* add to add array */
+ if (num_add == 0)
+ add_head = f;
+ cmd_flags = 0;
ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
- add_list[num_add].vlan_tag =
- cpu_to_le16(
- (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
+ if (f->vlan == I40E_VLAN_ANY) {
+ add_list[num_add].vlan_tag = 0;
+ cmd_flags |= I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ } else {
+ add_list[num_add].vlan_tag =
+ cpu_to_le16((u16)(f->vlan));
+ }
add_list[num_add].queue_number = 0;
-
cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
add_list[num_add].flags = cpu_to_le16(cmd_flags);
num_add++;
/* flush a full buffer */
if (num_add == filter_list_len) {
- aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
add_list, num_add,
NULL);
- aq_err = pf->hw.aq.asq_last_status;
+ aq_err = hw->aq.asq_last_status;
+ fcnt = i40e_update_filter_state(num_add,
+ add_list,
+ add_head,
+ aq_ret);
+ vsi->active_filters += fcnt;
+
+ if (fcnt != num_add) {
+ promisc_changed = true;
+ set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state);
+ vsi->promisc_threshold =
+ (vsi->active_filters * 3) / 4;
+ dev_warn(&pf->pdev->dev,
+ "Error %s adding RX filters on %s, promiscuous mode forced on\n",
+ i40e_aq_str(hw, aq_err),
+ vsi_name);
+ }
+ memset(add_list, 0, list_size);
num_add = 0;
-
- if (aq_ret)
- break;
- memset(add_list, 0, add_list_size);
}
- /* Entries from tmp_add_list were cloned from MAC
- * filter list, hence clean those cloned entries
- */
- list_del(&f->list);
- kfree(f);
}
-
if (num_add) {
- aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
+ aq_ret = i40e_aq_add_macvlan(hw, vsi->seid,
add_list, num_add, NULL);
- aq_err = pf->hw.aq.asq_last_status;
- num_add = 0;
+ aq_err = hw->aq.asq_last_status;
+ fcnt = i40e_update_filter_state(num_add, add_list,
+ add_head, aq_ret);
+ vsi->active_filters += fcnt;
+ if (fcnt != num_add) {
+ promisc_changed = true;
+ set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
+ &vsi->state);
+ vsi->promisc_threshold =
+ (vsi->active_filters * 3) / 4;
+ dev_warn(&pf->pdev->dev,
+ "Error %s adding RX filters on %s, promiscuous mode forced on\n",
+ i40e_aq_str(hw, aq_err), vsi_name);
+ }
}
+ /* Now move all of the filters from the temp add list back to
+ * the VSI's list.
+ */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
+ list_move_tail(&f->list, &vsi->mac_filter_list);
+ }
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
kfree(add_list);
add_list = NULL;
+ }
- if (add_happened && aq_ret && aq_err != I40E_AQ_RC_EINVAL) {
- retval = i40e_aq_rc_to_posix(aq_ret, aq_err);
+ /* Check to see if we can drop out of overflow promiscuous mode. */
+ if (test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state) &&
+ (vsi->active_filters < vsi->promisc_threshold)) {
+ int failed_count = 0;
+ /* See if we have any failed filters. We can't drop out of
+ * promiscuous until these have all been deleted.
+ */
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (f->state == I40E_FILTER_FAILED)
+ failed_count++;
+ }
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
+ if (!failed_count) {
dev_info(&pf->pdev->dev,
- "add filter failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw, aq_err));
- if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
- !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state)) {
- promisc_forced_on = true;
- set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
- &vsi->state);
- dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
- }
+ "filter logjam cleared on %s, leaving overflow promiscuous mode\n",
+ vsi_name);
+ clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ promisc_changed = true;
+ vsi->promisc_threshold = 0;
}
}
+ /* if the VF is not trusted do not do promisc */
+ if ((vsi->type == I40E_VSI_SRIOV) && !pf->vf[vsi->vf_id].trusted) {
+ clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
+ goto out;
+ }
+
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
bool cur_multipromisc;
@@ -2108,15 +2102,17 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
NULL);
if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set multi promisc failed, err %s aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ "set multi promisc failed on %s, err %s aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
- if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
+ if ((changed_flags & IFF_PROMISC) ||
+ (promisc_changed &&
+ test_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state))) {
bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
@@ -2132,32 +2128,58 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
*/
if (pf->cur_promisc != cur_promisc) {
pf->cur_promisc = cur_promisc;
- set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+ if (cur_promisc)
+ aq_ret =
+ i40e_aq_set_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ else
+ aq_ret =
+ i40e_aq_clear_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ if (aq_ret) {
+ retval = i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "Set default VSI failed on %s, err %s, aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
+ }
}
} else {
aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
- &vsi->back->hw,
+ hw,
vsi->seid,
- cur_promisc, NULL);
+ cur_promisc, NULL,
+ true);
if (aq_ret) {
retval =
i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set unicast promisc failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "set unicast promisc failed on %s, err %s, aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
}
aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
- &vsi->back->hw,
+ hw,
vsi->seid,
cur_promisc, NULL);
if (aq_ret) {
retval =
i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set multicast promisc failed, err %d, aq_err %d\n",
- aq_ret, pf->hw.aq.asq_last_status);
+ "set multicast promisc failed on %s, err %s, aq_err %s\n",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
}
}
aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
@@ -2168,9 +2190,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
pf->hw.aq.asq_last_status);
dev_info(&pf->pdev->dev,
"set brdcast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(&pf->hw, aq_ret),
- i40e_aq_str(&pf->hw,
- pf->hw.aq.asq_last_status));
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw,
+ hw->aq.asq_last_status));
}
}
out:
@@ -2339,7 +2361,7 @@ static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
**/
int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
{
- struct i40e_mac_filter *f, *add_f;
+ struct i40e_mac_filter *f, *ftmp, *add_f;
bool is_netdev, is_vf;
is_vf = (vsi->type == I40E_VSI_SRIOV);
@@ -2360,7 +2382,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
}
}
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
if (!add_f) {
dev_info(&vsi->back->pdev->dev,
@@ -2374,7 +2396,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
/* Now if we add a vlan tag, make sure to check if it is the first
* tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
* with 0, so we now accept untagged and specified tagged traffic
- * (and not any taged and untagged)
+ * (and not all tags along with untagged)
*/
if (vid > 0) {
if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
@@ -2396,7 +2418,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
/* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
if (vid > 0 && !vsi->info.pvid) {
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
is_vf, is_netdev))
continue;
@@ -2433,7 +2455,7 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
{
struct net_device *netdev = vsi->netdev;
- struct i40e_mac_filter *f, *add_f;
+ struct i40e_mac_filter *f, *ftmp, *add_f;
bool is_vf, is_netdev;
int filter_count = 0;
@@ -2446,7 +2468,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
if (is_netdev)
i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
- list_for_each_entry(f, &vsi->mac_filter_list, list)
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
/* go through all the filters for this VSI and if there is only
@@ -2479,7 +2501,7 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
}
if (!filter_count) {
- list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
is_vf, is_netdev);
@@ -2524,8 +2546,6 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
if (vid > 4095)
return -EINVAL;
- netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
-
/* If the network stack called us with vid = 0 then
* it is asking to receive priority tagged packets with
* vlan id 0. Our HW receives them by default when configured
@@ -2559,8 +2579,6 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
-
/* return code is ignored as there is nothing a user
* can do about failure to remove and a log message was
* already printed from the other function
@@ -2573,6 +2591,44 @@ static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
}
/**
+ * i40e_macaddr_init - explicitly write the mac address filters
+ *
+ * @vsi: pointer to the vsi
+ * @macaddr: the MAC address
+ *
+ * This is needed when the macaddr has been obtained by other
+ * means than the default, e.g., from Open Firmware or IDPROM.
+ * Returns 0 on success, negative on failure
+ **/
+static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr)
+{
+ int ret;
+ struct i40e_aqc_add_macvlan_element_data element;
+
+ ret = i40e_aq_mac_address_write(&vsi->back->hw,
+ I40E_AQC_WRITE_TYPE_LAA_WOL,
+ macaddr, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "Addr change for VSI failed: %d\n", ret);
+ return -EADDRNOTAVAIL;
+ }
+
+ memset(&element, 0, sizeof(element));
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
+ ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL);
+ if (ret) {
+ dev_info(&vsi->back->pdev->dev,
+ "add filter failed err %s aq_err %s\n",
+ i40e_stat_str(&vsi->back->hw, ret),
+ i40e_aq_str(&vsi->back->hw,
+ vsi->back->hw.aq.asq_last_status));
+ }
+ return ret;
+}
+
+/**
* i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
* @vsi: the vsi being brought back up
**/
@@ -2865,34 +2921,21 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
memset(&rx_ctx, 0, sizeof(rx_ctx));
ring->rx_buf_len = vsi->rx_buf_len;
- ring->rx_hdr_len = vsi->rx_hdr_len;
rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
- rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
rx_ctx.base = (ring->dma / 128);
rx_ctx.qlen = ring->count;
- if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
- set_ring_16byte_desc_enabled(ring);
- rx_ctx.dsize = 0;
- } else {
- rx_ctx.dsize = 1;
- }
+ /* use 32 byte descriptors */
+ rx_ctx.dsize = 1;
- rx_ctx.dtype = vsi->dtype;
- if (vsi->dtype) {
- set_ring_ps_enabled(ring);
- rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2 |
- I40E_RX_SPLIT_IP |
- I40E_RX_SPLIT_TCP_UDP |
- I40E_RX_SPLIT_SCTP;
- } else {
- rx_ctx.hsplit_0 = 0;
- }
+ /* descriptor type is always zero
+ * rx_ctx.dtype = 0;
+ */
+ rx_ctx.hsplit_0 = 0;
- rx_ctx.rxmax = min_t(u16, vsi->max_frame,
- (chain_len * ring->rx_buf_len));
+ rx_ctx.rxmax = min_t(u16, vsi->max_frame, chain_len * ring->rx_buf_len);
if (hw->revision_id == 0)
rx_ctx.lrxqthresh = 0;
else
@@ -2929,12 +2972,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
writel(0, ring->tail);
- if (ring_is_ps_enabled(ring)) {
- i40e_alloc_rx_headers(ring);
- i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
- } else {
- i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
- }
+ i40e_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
return 0;
}
@@ -2973,40 +3011,18 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
else
vsi->max_frame = I40E_RXBUFFER_2048;
- /* figure out correct receive buffer length */
- switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
- I40E_FLAG_RX_PS_ENABLED)) {
- case I40E_FLAG_RX_1BUF_ENABLED:
- vsi->rx_hdr_len = 0;
- vsi->rx_buf_len = vsi->max_frame;
- vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
- break;
- case I40E_FLAG_RX_PS_ENABLED:
- vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
- vsi->rx_buf_len = I40E_RXBUFFER_2048;
- vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
- break;
- default:
- vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
- vsi->rx_buf_len = I40E_RXBUFFER_2048;
- vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
- break;
- }
+ vsi->rx_buf_len = I40E_RXBUFFER_2048;
#ifdef I40E_FCOE
/* setup rx buffer for FCoE */
if ((vsi->type == I40E_VSI_FCOE) &&
(vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
- vsi->rx_hdr_len = 0;
vsi->rx_buf_len = I40E_RXBUFFER_3072;
vsi->max_frame = I40E_RXBUFFER_3072;
- vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
}
#endif /* I40E_FCOE */
/* round up for the chip's needs */
- vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
- BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
@@ -3058,8 +3074,19 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
**/
static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
{
+ struct i40e_pf *pf = vsi->back;
+ int err;
+
if (vsi->netdev)
i40e_set_rx_mode(vsi->netdev);
+
+ if (!!(pf->flags & I40E_FLAG_PF_MAC)) {
+ err = i40e_macaddr_init(vsi, pf->hw.mac.addr);
+ if (err) {
+ dev_warn(&pf->pdev->dev,
+ "could not set up macaddr; err %d\n", err);
+ }
+ }
}
/**
@@ -4001,6 +4028,7 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
/* clear the affinity_mask in the IRQ descriptor */
irq_set_affinity_hint(pf->msix_entries[vector].vector,
NULL);
+ synchronize_irq(pf->msix_entries[vector].vector);
free_irq(pf->msix_entries[vector].vector,
vsi->q_vectors[i]);
@@ -4164,7 +4192,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
int i;
i40e_stop_misc_vector(pf);
- if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
+ if (pf->flags & I40E_FLAG_MSIX_ENABLED && pf->msix_entries) {
synchronize_irq(pf->msix_entries[0].vector);
free_irq(pf->msix_entries[0].vector, pf);
}
@@ -4526,23 +4554,38 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
**/
static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
{
+ int i, tc_unused = 0;
u8 num_tc = 0;
- int i;
+ u8 ret = 0;
/* Scan the ETS Config Priority Table to find
* traffic class enabled for a given priority
- * and use the traffic class index to get the
- * number of traffic classes enabled
+ * and create a bitmask of enabled TCs
*/
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- if (dcbcfg->etscfg.prioritytable[i] > num_tc)
- num_tc = dcbcfg->etscfg.prioritytable[i];
- }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
- /* Traffic class index starts from zero so
- * increment to return the actual count
+ /* Now scan the bitmask to check for
+ * contiguous TCs starting with TC0
*/
- return num_tc + 1;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (num_tc & BIT(i)) {
+ if (!tc_unused) {
+ ret++;
+ } else {
+ pr_err("Non-contiguous TC - Disabling DCB\n");
+ return 1;
+ }
+ } else {
+ tc_unused = 1;
+ }
+ }
+
+ /* There is always at least TC0 */
+ if (!ret)
+ ret = 1;
+
+ return ret;
}
/**
@@ -5007,7 +5050,6 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
if (pf->vsi[v]->netdev)
i40e_dcbnl_set_all(pf->vsi[v]);
}
- i40e_notify_client_of_l2_param_changes(pf->vsi[v]);
}
}
@@ -5071,9 +5113,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
DCB_CAP_DCBX_VER_IEEE;
pf->flags |= I40E_FLAG_DCB_CAPABLE;
- /* Enable DCB tagging only when more than one TC */
+ /* Enable DCB tagging only when more than one TC
+ * or explicitly disable if only one TC
+ */
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
pf->flags |= I40E_FLAG_DCB_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
@@ -5232,12 +5278,6 @@ static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
usleep_range(1000, 2000);
i40e_down(vsi);
- /* Give a VF some time to respond to the reset. The
- * two second wait is based upon the watchdog cycle in
- * the VF driver.
- */
- if (vsi->type == I40E_VSI_SRIOV)
- msleep(2000);
i40e_up(vsi);
clear_bit(__I40E_CONFIG_BUSY, &pf->state);
}
@@ -5280,6 +5320,9 @@ void i40e_down(struct i40e_vsi *vsi)
i40e_clean_tx_ring(vsi->tx_rings[i]);
i40e_clean_rx_ring(vsi->rx_rings[i]);
}
+
+ i40e_notify_client_of_netdev_close(vsi, false);
+
}
/**
@@ -5391,15 +5434,7 @@ int i40e_open(struct net_device *netdev)
TCP_FLAG_CWR) >> 16);
wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
-#ifdef CONFIG_I40E_VXLAN
- vxlan_get_rx_port(netdev);
-#endif
-#ifdef CONFIG_I40E_GENEVE
- if (pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE)
- geneve_get_rx_port(netdev);
-#endif
-
- i40e_notify_client_of_netdev_open(vsi);
+ udp_tunnel_get_rx_info(netdev);
return 0;
}
@@ -5509,11 +5544,7 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
*
* Returns 0, this is not allowed to fail
**/
-#ifdef I40E_FCOE
int i40e_close(struct net_device *netdev)
-#else
-static int i40e_close(struct net_device *netdev)
-#endif
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
@@ -5538,8 +5569,6 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
WARN_ON(in_interrupt());
- if (i40e_check_asq_alive(&pf->hw))
- i40e_vc_notify_reset(pf);
/* do the biggest reset indicated */
if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
@@ -5691,7 +5720,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
u8 type;
/* Not DCB capable or capability disabled */
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return ret;
/* Ignore if event is not for Nearest Bridge */
@@ -5771,6 +5800,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
i40e_service_event_schedule(pf);
} else {
i40e_pf_unquiesce_all_vsi(pf);
+ /* Notify the client for the DCB changes */
+ i40e_notify_client_of_l2_param_changes(pf->vsi[pf->lan_vsi]);
}
exit:
@@ -5995,7 +6026,6 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
-
}
/**
@@ -6377,7 +6407,7 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
break;
default:
dev_info(&pf->pdev->dev,
- "ARQ Error: Unknown event 0x%04x received\n",
+ "ARQ: Unknown event 0x%04x ignored\n",
opcode);
break;
}
@@ -6742,6 +6772,8 @@ static void i40e_prep_for_reset(struct i40e_pf *pf)
clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
return;
+ if (i40e_check_asq_alive(&pf->hw))
+ i40e_vc_notify_reset(pf);
dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
@@ -6862,6 +6894,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
*/
ret = i40e_aq_set_phy_int_mask(&pf->hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (ret)
dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
@@ -7109,7 +7142,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
**/
static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
{
-#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
__be16 port;
@@ -7144,7 +7176,6 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
}
}
}
-#endif
}
/**
@@ -7226,7 +7257,7 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
vsi->alloc_queue_pairs = 1;
vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
I40E_REQ_DESCRIPTOR_MULTIPLE);
- vsi->num_q_vectors = 1;
+ vsi->num_q_vectors = pf->num_fdsb_msix;
break;
case I40E_VSI_VMDQ2:
@@ -7525,10 +7556,6 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
rx_ring->count = vsi->num_desc;
rx_ring->size = 0;
rx_ring->dcb_tc = 0;
- if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
- set_ring_16byte_desc_enabled(rx_ring);
- else
- clear_ring_16byte_desc_enabled(rx_ring);
rx_ring->rx_itr_setting = pf->rx_itr_default;
vsi->rx_rings[i] = rx_ring;
}
@@ -7614,9 +7641,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
/* reserve one vector for sideband flow director */
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
if (vectors_left) {
+ pf->num_fdsb_msix = 1;
v_budget++;
vectors_left--;
} else {
+ pf->num_fdsb_msix = 0;
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
}
}
@@ -7782,10 +7811,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
* i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
* @vsi: the VSI being configured
* @v_idx: index of the vector in the vsi struct
+ * @cpu: cpu to be used on affinity_mask
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
+static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
{
struct i40e_q_vector *q_vector;
@@ -7796,7 +7826,8 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
q_vector->vsi = vsi;
q_vector->v_idx = v_idx;
- cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
+ cpumask_set_cpu(cpu, &q_vector->affinity_mask);
+
if (vsi->netdev)
netif_napi_add(vsi->netdev, &q_vector->napi,
i40e_napi_poll, NAPI_POLL_WEIGHT);
@@ -7820,8 +7851,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- int v_idx, num_q_vectors;
- int err;
+ int err, v_idx, num_q_vectors, current_cpu;
/* if not MSIX, give the one vector only to the LAN VSI */
if (pf->flags & I40E_FLAG_MSIX_ENABLED)
@@ -7831,10 +7861,15 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
else
return -EINVAL;
+ current_cpu = cpumask_first(cpu_online_mask);
+
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
- err = i40e_vsi_alloc_q_vector(vsi, v_idx);
+ err = i40e_vsi_alloc_q_vector(vsi, v_idx, current_cpu);
if (err)
goto err_out;
+ current_cpu = cpumask_next(current_cpu, cpu_online_mask);
+ if (unlikely(current_cpu >= nr_cpu_ids))
+ current_cpu = cpumask_first(cpu_online_mask);
}
return 0;
@@ -7865,6 +7900,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
#endif
I40E_FLAG_RSS_ENABLED |
I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
@@ -7961,7 +7997,6 @@ static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
u8 *rss_lut;
int ret, i;
- memset(&rss_key, 0, sizeof(rss_key));
memcpy(&rss_key, seed, sizeof(rss_key));
rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
@@ -8084,24 +8119,45 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
+ u16 vf_id = vsi->vf_id;
u8 i;
/* Fill out hash function seed */
if (seed) {
u32 *seed_dw = (u32 *)seed;
- for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
- i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
+ if (vsi->type == I40E_VSI_MAIN) {
+ for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i),
+ seed_dw[i]);
+ } else if (vsi->type == I40E_VSI_SRIOV) {
+ for (i = 0; i <= I40E_VFQF_HKEY1_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw,
+ I40E_VFQF_HKEY1(i, vf_id),
+ seed_dw[i]);
+ } else {
+ dev_err(&pf->pdev->dev, "Cannot set RSS seed - invalid VSI type\n");
+ }
}
if (lut) {
u32 *lut_dw = (u32 *)lut;
- if (lut_size != I40E_HLUT_ARRAY_SIZE)
- return -EINVAL;
-
- for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
- wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+ if (vsi->type == I40E_VSI_MAIN) {
+ if (lut_size != I40E_HLUT_ARRAY_SIZE)
+ return -EINVAL;
+ for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++)
+ wr32(hw, I40E_PFQF_HLUT(i), lut_dw[i]);
+ } else if (vsi->type == I40E_VSI_SRIOV) {
+ if (lut_size != I40E_VF_HLUT_ARRAY_SIZE)
+ return -EINVAL;
+ for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
+ i40e_write_rx_ctl(hw,
+ I40E_VFQF_HLUT1(i, vf_id),
+ lut_dw[i]);
+ } else {
+ dev_err(&pf->pdev->dev, "Cannot set RSS LUT - invalid VSI type\n");
+ }
}
i40e_flush(hw);
@@ -8440,7 +8496,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
(NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
- pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
if (I40E_DEBUG_USER & debug)
pf->hw.debug_mask = debug;
@@ -8451,14 +8506,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
- I40E_FLAG_LINK_POLLING_ENABLED |
I40E_FLAG_MSIX_ENABLED;
- if (iommu_present(&pci_bus_type))
- pf->flags |= I40E_FLAG_RX_PS_ENABLED;
- else
- pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
-
/* Set default ITR */
pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
@@ -8621,7 +8670,9 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
/* Enable filters and mark for reset */
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
need_reset = true;
- pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ /* enable FD_SB only if there is MSI-X vector */
+ if (pf->num_fdsb_msix > 0)
+ pf->flags |= I40E_FLAG_FD_SB_ENABLED;
} else {
/* turn off filters, mark for reset and clear SW filter list */
if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
@@ -8670,7 +8721,6 @@ static int i40e_set_features(struct net_device *netdev,
return 0;
}
-#if IS_ENABLED(CONFIG_VXLAN) || IS_ENABLED(CONFIG_GENEVE)
/**
* i40e_get_udp_port_idx - Lookup a possibly offloaded for Rx UDP port
* @pf: board private structure
@@ -8690,21 +8740,18 @@ static u8 i40e_get_udp_port_idx(struct i40e_pf *pf, __be16 port)
return i;
}
-#endif
-
-#if IS_ENABLED(CONFIG_VXLAN)
/**
- * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
+ * i40e_udp_tunnel_add - Get notifications about UDP tunnel ports that come up
* @netdev: This physical port's netdev
- * @sa_family: Socket Family that VXLAN is notifying us about
- * @port: New UDP port number that VXLAN started listening to
+ * @ti: Tunnel endpoint information
**/
-static void i40e_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+static void i40e_udp_tunnel_add(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ __be16 port = ti->port;
u8 next_idx;
u8 idx;
@@ -8712,7 +8759,7 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
/* Check if port already exists */
if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "vxlan port %d already offloaded\n",
+ netdev_info(netdev, "port %d already offloaded\n",
ntohs(port));
return;
}
@@ -8721,131 +8768,75 @@ static void i40e_add_vxlan_port(struct net_device *netdev,
next_idx = i40e_get_udp_port_idx(pf, 0);
if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
- ntohs(port));
- return;
- }
-
- /* New port: add it and mark its index in the bitmap */
- pf->udp_ports[next_idx].index = port;
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
- pf->pending_udp_bitmap |= BIT_ULL(next_idx);
- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
-}
-
-/**
- * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
- * @netdev: This physical port's netdev
- * @sa_family: Socket Family that VXLAN is notifying us about
- * @port: UDP port number that VXLAN stopped listening to
- **/
-static void i40e_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
-{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u8 idx;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- /* if port exists, set it to 0 (mark for deletion)
- * and make it pending
- */
- pf->udp_ports[idx].index = 0;
- pf->pending_udp_bitmap |= BIT_ULL(idx);
- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
- } else {
- netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
- ntohs(port));
- }
-}
-#endif
-
-#if IS_ENABLED(CONFIG_GENEVE)
-/**
- * i40e_add_geneve_port - Get notifications about GENEVE ports that come up
- * @netdev: This physical port's netdev
- * @sa_family: Socket Family that GENEVE is notifying us about
- * @port: New UDP port number that GENEVE started listening to
- **/
-static void i40e_add_geneve_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
-{
- struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_vsi *vsi = np->vsi;
- struct i40e_pf *pf = vsi->back;
- u8 next_idx;
- u8 idx;
-
- if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
- return;
-
- idx = i40e_get_udp_port_idx(pf, port);
-
- /* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "udp port %d already offloaded\n",
+ netdev_info(netdev, "maximum number of offloaded UDP ports reached, not adding port %d\n",
ntohs(port));
return;
}
- /* Now check if there is space to add the new port */
- next_idx = i40e_get_udp_port_idx(pf, 0);
-
- if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- netdev_info(netdev, "maximum number of UDP ports reached, not adding port %d\n",
- ntohs(port));
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_VXLAN;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
+ return;
+ pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
+ break;
+ default:
return;
}
/* New port: add it and mark its index in the bitmap */
pf->udp_ports[next_idx].index = port;
- pf->udp_ports[next_idx].type = I40E_AQC_TUNNEL_TYPE_NGE;
pf->pending_udp_bitmap |= BIT_ULL(next_idx);
pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
-
- dev_info(&pf->pdev->dev, "adding geneve port %d\n", ntohs(port));
}
/**
- * i40e_del_geneve_port - Get notifications about GENEVE ports that go away
+ * i40e_udp_tunnel_del - Get notifications about UDP tunnel ports that go away
* @netdev: This physical port's netdev
- * @sa_family: Socket Family that GENEVE is notifying us about
- * @port: UDP port number that GENEVE stopped listening to
+ * @ti: Tunnel endpoint information
**/
-static void i40e_del_geneve_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+static void i40e_udp_tunnel_del(struct net_device *netdev,
+ struct udp_tunnel_info *ti)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
+ __be16 port = ti->port;
u8 idx;
- if (!(pf->flags & I40E_FLAG_GENEVE_OFFLOAD_CAPABLE))
- return;
-
idx = i40e_get_udp_port_idx(pf, port);
/* Check if port already exists */
- if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
- /* if port exists, set it to 0 (mark for deletion)
- * and make it pending
- */
- pf->udp_ports[idx].index = 0;
- pf->pending_udp_bitmap |= BIT_ULL(idx);
- pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+ if (idx >= I40E_MAX_PF_UDP_OFFLOAD_PORTS)
+ goto not_found;
- dev_info(&pf->pdev->dev, "deleting geneve port %d\n",
- ntohs(port));
- } else {
- netdev_warn(netdev, "geneve port %d was not found, not deleting\n",
- ntohs(port));
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_VXLAN)
+ goto not_found;
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (pf->udp_ports[idx].type != I40E_AQC_TUNNEL_TYPE_NGE)
+ goto not_found;
+ break;
+ default:
+ goto not_found;
}
+
+ /* if port exists, set it to 0 (mark for deletion)
+ * and make it pending
+ */
+ pf->udp_ports[idx].index = 0;
+ pf->pending_udp_bitmap |= BIT_ULL(idx);
+ pf->flags |= I40E_FLAG_UDP_FILTER_SYNC;
+
+ return;
+not_found:
+ netdev_warn(netdev, "UDP port %d was not found, not deleting\n",
+ ntohs(port));
}
-#endif
static int i40e_get_phys_port_id(struct net_device *netdev,
struct netdev_phys_item_id *ppid)
@@ -9074,14 +9065,9 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
.ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofchk,
-#if IS_ENABLED(CONFIG_VXLAN)
- .ndo_add_vxlan_port = i40e_add_vxlan_port,
- .ndo_del_vxlan_port = i40e_del_vxlan_port,
-#endif
-#if IS_ENABLED(CONFIG_GENEVE)
- .ndo_add_geneve_port = i40e_add_geneve_port,
- .ndo_del_geneve_port = i40e_del_geneve_port,
-#endif
+ .ndo_set_vf_trust = i40e_ndo_set_vf_trust,
+ .ndo_udp_tunnel_add = i40e_udp_tunnel_add,
+ .ndo_udp_tunnel_del = i40e_udp_tunnel_del,
.ndo_get_phys_port_id = i40e_get_phys_port_id,
.ndo_fdb_add = i40e_ndo_fdb_add,
.ndo_features_check = i40e_features_check,
@@ -9097,7 +9083,6 @@ static const struct net_device_ops i40e_netdev_ops = {
**/
static int i40e_config_netdev(struct i40e_vsi *vsi)
{
- u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_netdev_priv *np;
@@ -9114,40 +9099,44 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
np = netdev_priv(netdev);
np->vsi = vsi;
- netdev->hw_enc_features |= NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_TSO_ECN |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ netdev->hw_enc_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_SOFT_FEATURES |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
0;
- netdev->features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_SCTP_CRC |
- NETIF_F_HIGHDMA |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_GRE |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO_ECN |
- NETIF_F_TSO6 |
- NETIF_F_RXCSUM |
- NETIF_F_RXHASH |
- 0;
+ if (!(pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE))
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+
+ /* record features VLANs can make use of */
+ netdev->vlan_features |= netdev->hw_enc_features |
+ NETIF_F_TSO_MANGLEID;
if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
- netdev->features |= NETIF_F_NTUPLE;
- if (pf->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+ netdev->hw_features |= NETIF_F_NTUPLE;
+
+ netdev->hw_features |= netdev->hw_enc_features |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
- /* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
+ netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
@@ -9157,12 +9146,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
* default a MAC-VLAN filter that accepts any tagged packet
* which must be replaced by a normal filter.
*/
- if (!i40e_rm_default_mac_filter(vsi, mac_addr)) {
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_add_filter(vsi, mac_addr,
- I40E_VLAN_ANY, false, true);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
- }
+ i40e_rm_default_mac_filter(vsi, mac_addr);
+ spin_lock_bh(&vsi->mac_filter_list_lock);
+ i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
+ spin_unlock_bh(&vsi->mac_filter_list_lock);
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -9174,18 +9161,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
spin_unlock_bh(&vsi->mac_filter_list_lock);
}
- spin_lock_bh(&vsi->mac_filter_list_lock);
- i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
- spin_unlock_bh(&vsi->mac_filter_list_lock);
-
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
- /* vlan gets same features (except vlan offload)
- * after any tweaks for specific VSI types
- */
- netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER);
+
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
/* Setup netdev TC information */
@@ -9260,8 +9238,7 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
static int i40e_add_vsi(struct i40e_vsi *vsi)
{
int ret = -ENODEV;
- u8 laa_macaddr[ETH_ALEN];
- bool found_laa_mac_filter = false;
+ i40e_status aq_ret = 0;
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_vsi_context ctxt;
@@ -9398,7 +9375,8 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.info.valid_sections |=
cpu_to_le16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
ctxt.info.queueing_opt_flags |=
- I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ (I40E_AQ_VSI_QUE_OPT_TCP_ENA |
+ I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI);
}
ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
@@ -9448,42 +9426,29 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
vsi->seid = ctxt.seid;
vsi->id = ctxt.vsi_number;
}
+ /* Except FDIR VSI, for all othet VSI set the broadcast filter */
+ if (vsi->type != I40E_VSI_FDIR) {
+ aq_ret = i40e_aq_set_vsi_broadcast(hw, vsi->seid, true, NULL);
+ if (aq_ret) {
+ ret = i40e_aq_rc_to_posix(aq_ret,
+ hw->aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "set brdcast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ }
+ vsi->active_filters = 0;
+ clear_bit(__I40E_FILTER_OVERFLOW_PROMISC, &vsi->state);
spin_lock_bh(&vsi->mac_filter_list_lock);
/* If macvlan filters already exist, force them to get loaded */
list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
- f->changed = true;
+ f->state = I40E_FILTER_NEW;
f_count++;
-
- /* Expected to have only one MAC filter entry for LAA in list */
- if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
- ether_addr_copy(laa_macaddr, f->macaddr);
- found_laa_mac_filter = true;
- }
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
- if (found_laa_mac_filter) {
- struct i40e_aqc_remove_macvlan_element_data element;
-
- memset(&element, 0, sizeof(element));
- ether_addr_copy(element.mac_addr, laa_macaddr);
- element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
- ret = i40e_aq_remove_macvlan(hw, vsi->seid,
- &element, 1, NULL);
- if (ret) {
- /* some older FW has a different default */
- element.flags |=
- I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
- i40e_aq_remove_macvlan(hw, vsi->seid,
- &element, 1, NULL);
- }
-
- i40e_aq_mac_address_write(hw,
- I40E_AQC_WRITE_TYPE_LAA_WOL,
- laa_macaddr, NULL);
- }
-
if (f_count) {
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
pf->flags |= I40E_FLAG_FILTER_SYNC;
@@ -9694,6 +9659,8 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
+ if (vsi->type == I40E_VSI_MAIN)
+ i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr);
/* assign it some queues */
ret = i40e_alloc_rings(vsi);
@@ -9719,44 +9686,6 @@ err_vsi:
}
/**
- * i40e_macaddr_init - explicitly write the mac address filters.
- *
- * @vsi: pointer to the vsi.
- * @macaddr: the MAC address
- *
- * This is needed when the macaddr has been obtained by other
- * means than the default, e.g., from Open Firmware or IDPROM.
- * Returns 0 on success, negative on failure
- **/
-static int i40e_macaddr_init(struct i40e_vsi *vsi, u8 *macaddr)
-{
- int ret;
- struct i40e_aqc_add_macvlan_element_data element;
-
- ret = i40e_aq_mac_address_write(&vsi->back->hw,
- I40E_AQC_WRITE_TYPE_LAA_WOL,
- macaddr, NULL);
- if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "Addr change for VSI failed: %d\n", ret);
- return -EADDRNOTAVAIL;
- }
-
- memset(&element, 0, sizeof(element));
- ether_addr_copy(element.mac_addr, macaddr);
- element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
- ret = i40e_aq_add_macvlan(&vsi->back->hw, vsi->seid, &element, 1, NULL);
- if (ret) {
- dev_info(&vsi->back->pdev->dev,
- "add filter failed err %s aq_err %s\n",
- i40e_stat_str(&vsi->back->hw, ret),
- i40e_aq_str(&vsi->back->hw,
- vsi->back->hw.aq.asq_last_status));
- }
- return ret;
-}
-
-/**
* i40e_vsi_setup - Set up a VSI by a given type
* @pf: board private structure
* @type: VSI type
@@ -10168,14 +10097,14 @@ void i40e_veb_release(struct i40e_veb *veb)
static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{
struct i40e_pf *pf = veb->pf;
- bool is_default = veb->pf->cur_promisc;
bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
int ret;
- /* get a VEB from the hardware */
ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
- veb->enabled_tc, is_default,
+ veb->enabled_tc, false,
&veb->seid, enable_stats, NULL);
+
+ /* get a VEB from the hardware */
if (ret) {
dev_info(&pf->pdev->dev,
"couldn't add VEB, err %s aq_err %s\n",
@@ -10444,6 +10373,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
**/
static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
{
+ u16 flags = 0;
int ret;
/* find out what's out there already */
@@ -10457,6 +10387,32 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
}
i40e_pf_reset_stats(pf);
+ /* set the switch config bit for the whole device to
+ * support limited promisc or true promisc
+ * when user requests promisc. The default is limited
+ * promisc.
+ */
+
+ if ((pf->hw.pf_id == 0) &&
+ !(pf->flags & I40E_FLAG_TRUE_PROMISC_SUPPORT))
+ flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+
+ if (pf->hw.pf_id == 0) {
+ u16 valid_flags;
+
+ valid_flags = I40E_AQ_SET_SWITCH_CFG_PROMISC;
+ ret = i40e_aq_set_switch_config(&pf->hw, flags, valid_flags,
+ NULL);
+ if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) {
+ dev_info(&pf->pdev->dev,
+ "couldn't set switch config bits, err %s aq_err %s\n",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+ /* not a fatal problem, just keep going */
+ }
+ }
+
/* first time setup */
if (pf->lan_vsi == I40E_NO_VSI || reinit) {
struct i40e_vsi *vsi = NULL;
@@ -10551,6 +10507,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
@@ -10574,7 +10531,8 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
/* Not enough queues for all TCs */
if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
(queues_left < I40E_MAX_TRAFFIC_CLASS)) {
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED);
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
pf->num_lan_qps = max_t(int, pf->rss_size_max,
@@ -10684,11 +10642,9 @@ static void i40e_print_features(struct i40e_pf *pf)
#ifdef CONFIG_PCI_IOV
i += snprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs);
#endif
- i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d RX: %s",
+ i += snprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d",
pf->hw.func_caps.num_vsis,
- pf->vsi[pf->lan_vsi]->num_queue_pairs,
- pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
-
+ pf->vsi[pf->lan_vsi]->num_queue_pairs);
if (pf->flags & I40E_FLAG_RSS_ENABLED)
i += snprintf(&buf[i], REMAIN(i), " RSS");
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
@@ -10699,12 +10655,8 @@ static void i40e_print_features(struct i40e_pf *pf)
}
if (pf->flags & I40E_FLAG_DCB_CAPABLE)
i += snprintf(&buf[i], REMAIN(i), " DCB");
-#if IS_ENABLED(CONFIG_VXLAN)
i += snprintf(&buf[i], REMAIN(i), " VxLAN");
-#endif
-#if IS_ENABLED(CONFIG_GENEVE)
i += snprintf(&buf[i], REMAIN(i), " Geneve");
-#endif
if (pf->flags & I40E_FLAG_PTP)
i += snprintf(&buf[i], REMAIN(i), " PTP");
#ifdef I40E_FCOE
@@ -10779,8 +10731,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* set up pci connections */
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM), i40e_driver_name);
+ err = pci_request_mem_regions(pdev, i40e_driver_name);
if (err) {
dev_info(&pdev->dev,
"pci_request_selected_regions failed %d\n", err);
@@ -10827,6 +10778,12 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->bus.func = PCI_FUNC(pdev->devfn);
pf->instance = pfs_found;
+ /* set up the locks for the AQ, do this only once in probe
+ * and destroy them only once in remove
+ */
+ mutex_init(&hw->aq.asq_mutex);
+ mutex_init(&hw->aq.arq_mutex);
+
if (debug != -1) {
pf->msg_enable = pf->hw.debug_mask;
pf->msg_enable = debug;
@@ -10872,12 +10829,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* set up a default setting for link flow control */
pf->hw.fc.requested_mode = I40E_FC_NONE;
- /* set up the locks for the AQ, do this only once in probe
- * and destroy them only once in remove
- */
- mutex_init(&hw->aq.asq_mutex);
- mutex_init(&hw->aq.arq_mutex);
-
err = i40e_init_adminq(hw);
if (err) {
if (err == I40E_ERR_FIRMWARE_API_VERSION)
@@ -10978,7 +10929,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED);
/* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
@@ -11069,6 +11020,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
err = i40e_aq_set_phy_int_mask(&pf->hw,
~(I40E_AQ_EVENT_LINK_UPDOWN |
+ I40E_AQ_EVENT_MEDIA_NA |
I40E_AQ_EVENT_MODULE_QUAL_FAIL), NULL);
if (err)
dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
@@ -11270,15 +11222,13 @@ err_init_lan_hmc:
kfree(pf->qp_pile);
err_sw_init:
err_adminq_setup:
- (void)i40e_shutdown_adminq(hw);
err_pf_reset:
iounmap(hw->hw_addr);
err_ioremap:
kfree(pf);
err_pf_alloc:
pci_disable_pcie_error_reporting(pdev);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -11312,8 +11262,10 @@ static void i40e_remove(struct pci_dev *pdev)
/* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, &pf->state);
set_bit(__I40E_DOWN, &pf->state);
- del_timer_sync(&pf->service_timer);
- cancel_work_sync(&pf->service_task);
+ if (pf->service_timer.data)
+ del_timer_sync(&pf->service_timer);
+ if (pf->service_task.func)
+ cancel_work_sync(&pf->service_task);
if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
i40e_free_vfs(pf);
@@ -11387,8 +11339,7 @@ static void i40e_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr);
kfree(pf);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
@@ -11533,6 +11484,7 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct i40e_pf *pf = pci_get_drvdata(pdev);
struct i40e_hw *hw = &pf->hw;
+ int retval = 0;
set_bit(__I40E_SUSPENDED, &pf->state);
set_bit(__I40E_DOWN, &pf->state);
@@ -11544,10 +11496,16 @@ static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
+ i40e_stop_misc_vector(pf);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
pci_wake_from_d3(pdev, pf->wol_en);
pci_set_power_state(pdev, PCI_D3hot);
- return 0;
+ return retval;
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
index 5730f8091e1b..954efe3118db 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c
@@ -693,10 +693,10 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
/* early check for status command and debug msgs */
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
- i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
i40e_nvm_update_state_str[upd_cmd],
hw->nvmupd_state,
- hw->aq.nvm_release_on_done,
+ hw->nvm_release_on_done, hw->nvm_wait_opcode,
cmd->command, cmd->config, cmd->offset, cmd->data_size);
if (upd_cmd == I40E_NVMUPD_INVALID) {
@@ -710,7 +710,18 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
* going into the state machine
*/
if (upd_cmd == I40E_NVMUPD_STATUS) {
+ if (!cmd->data_size) {
+ *perrno = -EFAULT;
+ return I40E_ERR_BUF_TOO_SHORT;
+ }
+
bytes[0] = hw->nvmupd_state;
+
+ if (cmd->data_size >= 4) {
+ bytes[1] = 0;
+ *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ }
+
return 0;
}
@@ -729,6 +740,14 @@ i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
case I40E_NVMUPD_STATE_INIT_WAIT:
case I40E_NVMUPD_STATE_WRITE_WAIT:
+ /* if we need to stop waiting for an event, clear
+ * the wait info and return before doing anything else
+ */
+ if (cmd->offset == 0xffff) {
+ i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
+ return 0;
+ }
+
status = I40E_ERR_NOT_READY;
*perrno = -EBUSY;
break;
@@ -799,7 +818,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -815,7 +835,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -828,10 +849,12 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
- if (status)
+ if (status) {
i40e_release_nvm(hw);
- else
+ } else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
}
break;
@@ -849,7 +872,8 @@ static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
-EIO;
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -940,8 +964,10 @@ retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
- if (!status)
+ if (!status) {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
break;
case I40E_NVMUPD_WRITE_LCB:
@@ -953,7 +979,8 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@@ -967,6 +994,7 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
}
break;
@@ -980,7 +1008,8 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
- hw->aq.nvm_release_on_done = true;
+ hw->nvm_release_on_done = true;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@@ -1030,6 +1059,37 @@ retry:
}
/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+{
+ if (opcode == hw->nvm_wait_opcode) {
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = false;
+ }
+ hw->nvm_wait_opcode = 0;
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+/**
* i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -1189,6 +1249,12 @@ static i40e_status i40e_nvmupd_exec_aq(struct i40e_hw *hw,
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
+ /* should we wait for a followup event? */
+ if (cmd->offset) {
+ hw->nvm_wait_opcode = cmd->offset;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+
return status;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index d51eee5bf79a..4660c5abc855 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -98,6 +98,8 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
bool qualified_modules, bool report_init,
struct i40e_aq_get_phy_abilities_resp *abilities,
@@ -130,9 +132,18 @@ i40e_status i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 vsi_id, bool set_filter,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc);
i40e_status i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
+ u16 seid, bool enable,
+ u16 vid,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
u16 seid, bool enable,
struct i40e_asq_cmd_details *cmd_details);
@@ -174,6 +185,10 @@ i40e_status i40e_aq_get_switch_config(struct i40e_hw *hw,
struct i40e_aqc_get_switch_config_resp *buf,
u16 buf_size, u16 *start_seid,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_switch_config(struct i40e_hw *hw,
+ u16 flags,
+ u16 valid_flags,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_request_resource(struct i40e_hw *hw,
enum i40e_aq_resources_ids resource,
enum i40e_aq_resource_access_type access,
@@ -228,10 +243,6 @@ i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile profile,
- u8 pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_bw,
struct i40e_asq_cmd_details *cmd_details);
@@ -308,6 +319,7 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 565ca7c835bc..ed39cbad24bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -158,9 +158,10 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- struct timespec64 now, then = ns_to_timespec64(delta);
+ struct timespec64 now, then;
unsigned long flags;
+ then = ns_to_timespec64(delta);
spin_lock_irqsave(&pf->tmreg_lock, flags);
i40e_ptp_read(pf, &now);
@@ -288,9 +289,7 @@ void i40e_ptp_rx_hang(struct i40e_vsi *vsi)
rd32(hw, I40E_PRTTSYN_RXTIME_H(3));
pf->last_rx_ptp_check = jiffies;
pf->rx_hwtstamp_cleared++;
- dev_warn(&vsi->back->pdev->dev,
- "%s: clearing Rx timestamp hang\n",
- __func__);
+ WARN_ONCE(1, "Detected Rx timestamp register hang\n");
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 6a49b7ae511c..df7ecc9578c9 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -636,19 +636,21 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
- * @tx_ring: tx ring to clean
- * @budget: how many cleans we're allowed
+ * @vsi: the VSI we care about
+ * @tx_ring: Tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ struct i40e_ring *tx_ring, int napi_budget)
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
- unsigned int total_packets = 0;
- unsigned int total_bytes = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = vsi->work_limit;
tx_buf = &tx_ring->tx_bi[i];
tx_desc = I40E_TX_DESC(tx_ring, i);
@@ -678,7 +680,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
total_packets += tx_buf->gso_segs;
/* free the skb */
- dev_consume_skb_any(tx_buf->skb);
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -738,18 +740,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_packets += total_packets;
if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
- unsigned int j = 0;
-
/* check to see if there are < 4 descriptors
* waiting to be written back, then kick the hardware to force
* them to be written back in case we stay in NAPI.
* In this mode on X722 we do not enable Interrupt.
*/
- j = i40e_get_tx_pending(tx_ring, false);
+ unsigned int j = i40e_get_tx_pending(tx_ring, false);
if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ !test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
@@ -767,7 +767,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ !test_bit(__I40E_DOWN, &vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
@@ -1022,7 +1022,6 @@ err:
void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
unsigned long bi_size;
u16 i;
@@ -1030,48 +1029,22 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
- if (ring_is_ps_enabled(rx_ring)) {
- int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
-
- rx_bi = &rx_ring->rx_bi[0];
- if (rx_bi->hdr_buf) {
- dma_free_coherent(dev,
- bufsz,
- rx_bi->hdr_buf,
- rx_bi->dma);
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = 0;
- rx_bi->hdr_buf = NULL;
- }
- }
- }
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- if (rx_bi->dma) {
- dma_unmap_single(dev,
- rx_bi->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
- }
+ struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+
if (rx_bi->skb) {
dev_kfree_skb(rx_bi->skb);
rx_bi->skb = NULL;
}
- if (rx_bi->page) {
- if (rx_bi->page_dma) {
- dma_unmap_page(dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page_dma = 0;
- }
- __free_page(rx_bi->page);
- rx_bi->page = NULL;
- rx_bi->page_offset = 0;
- }
+ if (!rx_bi->page)
+ continue;
+
+ dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_pages(rx_bi->page, 0);
+
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
}
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
@@ -1080,6 +1053,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -1104,37 +1078,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
- * i40e_alloc_rx_headers - allocate rx header buffers
- * @rx_ring: ring to alloc buffers
- *
- * Allocate rx header buffers for the entire ring. As these are static,
- * this is only called when setting up a new ring.
- **/
-void i40e_alloc_rx_headers(struct i40e_ring *rx_ring)
-{
- struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
- dma_addr_t dma;
- void *buffer;
- int buf_size;
- int i;
-
- if (rx_ring->rx_bi[0].hdr_buf)
- return;
- /* Make sure the buffers don't cross cache line boundaries. */
- buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
- buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
- &dma, GFP_KERNEL);
- if (!buffer)
- return;
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = dma + (i * buf_size);
- rx_bi->hdr_buf = buffer + (i * buf_size);
- }
-}
-
-/**
* i40e_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
@@ -1155,9 +1098,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
- ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
- : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -1168,6 +1109,7 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
goto err;
}
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -1186,6 +1128,10 @@ err:
static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = val;
+
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -1196,160 +1142,122 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
- * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
+ * i40e_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buffer struct to modify
*
- * Returns true if any errors on allocation
+ * Returns true if the page was successfully allocated or
+ * reused.
**/
-bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *bi)
{
- u16 i = rx_ring->next_to_use;
- union i40e_rx_desc *rx_desc;
- struct i40e_rx_buffer *bi;
- const int current_node = numa_node_id();
+ struct page *page = bi->page;
+ dma_addr_t dma;
- /* do nothing if no valid netdev defined */
- if (!rx_ring->netdev || !cleaned_count)
- return false;
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page)) {
+ rx_ring->rx_stats.page_reuse_count++;
+ return true;
+ }
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
+ /* alloc new page for storage */
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
- if (bi->skb) /* desc is in use */
- goto no_buffers;
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- /* If we've been moved to a different NUMA node, release the
- * page so we can get a new one on the current node.
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
*/
- if (bi->page && page_to_nid(bi->page) != current_node) {
- dma_unmap_page(rx_ring->dev,
- bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else if (bi->page) {
- rx_ring->rx_stats.page_reuse_count++;
- }
-
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC);
- if (!bi->page) {
- rx_ring->rx_stats.alloc_page_failed++;
- goto no_buffers;
- }
- bi->page_dma = dma_map_page(rx_ring->dev,
- bi->page,
- 0,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
- rx_ring->rx_stats.alloc_page_failed++;
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- bi->page_offset = 0;
- goto no_buffers;
- }
- bi->page_offset = 0;
- }
-
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info.
- */
- rx_desc->read.pkt_addr =
- cpu_to_le64(bi->page_dma + bi->page_offset);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- i++;
- if (i == rx_ring->count)
- i = 0;
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_pages(page, 0);
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
}
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
- return false;
+ return true;
+}
-no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
- /* make sure to come back via polling to try again after
- * allocation failure
- */
- return true;
+ if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (vlan_tag & VLAN_VID_MASK))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
- * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
+ * i40e_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
*
- * Returns true if any errors on allocation
+ * Returns false if all allocations were successful, true if any fail
**/
-bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40e_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
{
- u16 i = rx_ring->next_to_use;
+ u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi;
- struct sk_buff *skb;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
- skb = bi->skb;
-
- if (!skb) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_buf_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- goto no_buffers;
- }
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- bi->skb = skb;
- }
+ rx_desc = I40E_RX_DESC(rx_ring, ntu);
+ bi = &rx_ring->rx_bi[ntu];
- if (!bi->dma) {
- bi->dma = dma_map_single(rx_ring->dev,
- skb->data,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- rx_ring->rx_stats.alloc_buff_failed++;
- bi->dma = 0;
- dev_kfree_skb(bi->skb);
- bi->skb = NULL;
- goto no_buffers;
- }
- }
+ do {
+ if (!i40e_alloc_mapped_page(rx_ring, bi))
+ goto no_buffers;
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc->read.hdr_addr = 0;
- i++;
- if (i == rx_ring->count)
- i = 0;
- }
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ rx_desc++;
+ bi++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = I40E_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_bi;
+ ntu = 0;
+ }
+
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
return false;
no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
/* make sure to come back via polling to try again after
* allocation failure
@@ -1358,41 +1266,35 @@ no_buffers:
}
/**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring: rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-static void i40e_receive_skb(struct i40e_ring *rx_ring,
- struct sk_buff *skb, u16 vlan_tag)
-{
- struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
- if (vlan_tag & VLAN_VID_MASK)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
- napi_gro_receive(&q_vector->napi, skb);
-}
-
-/**
* i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
- * @rx_status: status value of last descriptor in packet
- * @rx_error: error value of last descriptor in packet
- * @rx_ptype: ptype value of last descriptor in packet
+ * @rx_desc: the receive descriptor
+ *
+ * skb->protocol must be set before this function is called
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
- u32 rx_status,
- u32 rx_error,
- u16 rx_ptype)
+ union i40e_rx_desc *rx_desc)
{
- struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
- bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
+ struct i40e_rx_ptype_decoded decoded;
+ u32 rx_error, rx_status;
+ bool ipv4, ipv6;
+ u8 ptype;
+ u64 qword;
+
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+
/* Rx csum enabled and ip headers found? */
if (!(vsi->netdev->features & NETIF_F_RXCSUM))
return;
@@ -1432,20 +1334,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* The hardware supported by this driver does not validate outer
- * checksums for tunneled VXLAN or GENEVE frames. I don't agree
- * with it but the specification states that you "MAY validate", it
- * doesn't make it a hard requirement so if we have validated the
- * inner checksum report CHECKSUM_UNNECESSARY.
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
*/
-
- ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
- ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = ipv4_tunnel || ipv6_tunnel;
+ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* fall though */
+ default:
+ break;
+ }
return;
@@ -1459,7 +1364,7 @@ checksum_fail:
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
+static inline int i40e_ptype_to_htype(u8 ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
@@ -1487,11 +1392,11 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
u8 rx_ptype)
{
u32 hash;
- const __le64 rss_mask =
+ const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
- if (ring->netdev->features & NETIF_F_RXHASH)
+ if (!(ring->netdev->features & NETIF_F_RXHASH))
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
@@ -1501,346 +1406,436 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
}
/**
- * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @rx_ptype: the packet type decoded by hardware
*
- * Returns true if there's any budget left (e.g. the clean is finished)
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
**/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
+static inline
+void i40e_process_skb_fields(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+ u8 rx_ptype)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
- u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- u16 i = rx_ring->next_to_clean;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u32 copysize;
+ u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ u32 rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ u32 rsyn = (rx_status & I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
+ I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT;
- if (budget <= 0)
- return 0;
+ if (unlikely(rsyn)) {
+ i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, rsyn);
+ rx_ring->last_rx_timestamp = jiffies;
+ }
- do {
- struct i40e_rx_buffer *rx_bi;
- struct sk_buff *skb;
- u16 vlan_tag;
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- failure = failure ||
- i40e_alloc_rx_buffers_ps(rx_ring,
- cleaned_count);
- cleaned_count = 0;
- }
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
+ i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * DD bit is set.
- */
- dma_rmb();
- /* sync header buffer for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- if (i40e_rx_is_programming_status(qword)) {
- i40e_clean_programming_status(rx_ring, rx_desc);
- I40E_RX_INCREMENT(rx_ring, i);
- continue;
- }
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- if (likely(!skb)) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_hdr_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- failure = true;
- break;
- }
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+}
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- }
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
- rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
- I40E_RXD_QW1_LENGTH_SPH_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+/**
+ * i40e_pull_tail - i40e specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an i40e specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- /* sync half-page for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_bi->page_dma,
- rx_bi->page_offset,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
- rx_bi->skb = NULL;
- cleaned_count++;
- copysize = 0;
- if (rx_hbo || rx_sph) {
- int len;
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
- if (rx_hbo)
- len = I40E_RX_HDR_SIZE;
- else
- len = rx_header_len;
- memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
- } else if (skb->len == 0) {
- int len;
- unsigned char *va = page_address(rx_bi->page) +
- rx_bi->page_offset;
-
- len = min(rx_packet_len, rx_ring->rx_hdr_len);
- memcpy(__skb_put(skb, len), va, len);
- copysize = len;
- rx_packet_len -= len;
- }
- /* Get the rest of the data if this was a header split */
- if (rx_packet_len) {
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rx_bi->page,
- rx_bi->page_offset + copysize,
- rx_packet_len, I40E_RXBUFFER_2048);
-
- /* If the page count is more than 2, then both halves
- * of the page are used and we need to free it. Do it
- * here instead of in the alloc code. Otherwise one
- * of the half-pages might be released between now and
- * then, and we wouldn't know which one to use.
- * Don't call get_page and free_page since those are
- * both expensive atomic operations that just change
- * the refcount in opposite directions. Just give the
- * page to the stack; he can have our refcount.
- */
- if (page_count(rx_bi->page) > 2) {
- dma_unmap_page(rx_ring->dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page = NULL;
- rx_bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else {
- get_page(rx_bi->page);
- /* switch to the other half-page here; the
- * allocation code programs the right addr
- * into HW. If we haven't used this half-page,
- * the address won't be changed, and HW can
- * just use it next time through.
- */
- rx_bi->page_offset ^= PAGE_SIZE / 2;
- }
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
- }
- I40E_RX_INCREMENT(rx_ring, i);
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- struct i40e_rx_buffer *next_buffer;
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
- next_buffer = &rx_ring->rx_bi[i];
- next_buffer->skb = skb;
- rx_ring->rx_stats.non_eop_descs++;
- continue;
- }
+/**
+ * i40e_cleanup_headers - Correct empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being fixed
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ i40e_pull_tail(rx_ring, skb);
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
- dev_kfree_skb_any(skb);
- continue;
- }
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ return false;
+}
- if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
- i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
- I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
- I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
- rx_ring->last_rx_timestamp = jiffies;
- }
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *old_buff)
+{
+ struct i40e_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
- total_rx_packets++;
+ new_buff = &rx_ring->rx_bi[nta];
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ /* transfer page from old buffer to new buffer */
+ *new_buff = *old_buff;
+}
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
-#ifdef I40E_FCOE
- if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
- dev_kfree_skb_any(skb);
- continue;
- }
+/**
+ * i40e_page_is_reserved - check if reuse is possible
+ * @page: page struct to check
+ */
+static inline bool i40e_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct page *page = rx_buffer->page;
+ u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = I40E_RXBUFFER_2048;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif
- i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
+ /* will the data fit in the skb we allocated? if so, just
+ * copy it as it is pretty small anyway
+ */
+ if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
- } while (likely(total_rx_packets < budget));
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- rx_ring->q_vector->rx.total_packets += total_rx_packets;
- rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* page is not reserved, we can reuse buffer as-is */
+ if (likely(!i40e_page_is_reserved(page)))
+ return true;
- return failure ? budget : total_rx_packets;
+ /* this page cannot be reused so discard it */
+ __free_pages(page, 0);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, truesize);
+
+ /* avoid re-using remote pages */
+ if (unlikely(i40e_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > last_offset)
+ return false;
+#endif
+
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
+ */
+ get_page(rx_buffer->page);
+
+ return true;
+}
+
+/**
+ * i40e_fetch_rx_buffer - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_desc: descriptor containing info written by hardware
+ *
+ * This function allocates an skb on the fly, and populates it with the page
+ * data from the current receive descriptor, taking care to set up the skb
+ * correctly, as well as handling calling the page recycle function if
+ * necessary.
+ */
+static inline
+struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc)
+{
+ struct i40e_rx_buffer *rx_buffer;
+ struct sk_buff *skb;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ skb = rx_buffer->skb;
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) + rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ I40E_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ return NULL;
+ }
+
+ /* we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ } else {
+ rx_buffer->skb = NULL;
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ i40e_reuse_rx_page(rx_ring, rx_buffer);
+ rx_ring->rx_stats.page_reuse_count++;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->page = NULL;
+
+ return skb;
}
/**
- * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40e_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
*
- * Returns number of packets cleaned
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
**/
-static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+
+#define staterrlen rx_desc->wb.qword1.status_error_len
+ if (unlikely(i40e_rx_is_programming_status(le64_to_cpu(staterrlen)))) {
+ i40e_clean_programming_status(rx_ring, rx_desc);
+ rx_ring->rx_bi[ntc].skb = skb;
+ return true;
+ }
+ /* if we are the last buffer then there is nothing else to do */
+#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
+ if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+ return false;
+
+ /* place skb in next buffer to be received */
+ rx_ring->rx_bi[ntc].skb = skb;
+ rx_ring->rx_stats.non_eop_descs++;
+
+ return true;
+}
+
+/**
+ * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
+ **/
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- u16 rx_packet_len;
bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u16 i;
- do {
- struct i40e_rx_buffer *rx_bi;
+ while (likely(total_rx_packets < budget)) {
+ union i40e_rx_desc *rx_desc;
struct sk_buff *skb;
+ u32 rx_status;
u16 vlan_tag;
+ u8 rx_ptype;
+ u64 qword;
+
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
failure = failure ||
- i40e_alloc_rx_buffers_1buf(rx_ring,
- cleaned_count);
+ i40e_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
+ rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ I40E_RXD_QW1_STATUS_SHIFT;
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
+ /* status_error_len will always be zero for unused descriptors
+ * because it's cleared in cleanup, and overlaps with hdr_addr
+ * which is always zero because packet split isn't used, if the
+ * hardware wrote DD then it will be non-zero
+ */
+ if (!rx_desc->wb.qword1.status_error_len)
+ break;
+
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* DD bit is set.
*/
dma_rmb();
- if (i40e_rx_is_programming_status(qword)) {
- i40e_clean_programming_status(rx_ring, rx_desc);
- I40E_RX_INCREMENT(rx_ring, i);
- continue;
- }
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- prefetch(skb->data);
-
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ skb = i40e_fetch_rx_buffer(rx_ring, rx_desc);
+ if (!skb)
+ break;
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- rx_bi->skb = NULL;
cleaned_count++;
- /* Get the header and possibly the whole packet
- * If this is an skb from previous receive dma will be 0
- */
- skb_put(skb, rx_packet_len);
- dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
-
- I40E_RX_INCREMENT(rx_ring, i);
-
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- rx_ring->rx_stats.non_eop_descs++;
+ if (i40e_is_non_eop(rx_ring, rx_desc, skb))
continue;
- }
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ /* ERR_MASK will only have valid bits if EOP set, and
+ * what we are doing here is actually checking
+ * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+ * the error field
+ */
+ if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
continue;
}
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
- i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
- I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
- I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT);
- rx_ring->last_rx_timestamp = jiffies;
- }
+ if (i40e_cleanup_headers(rx_ring, skb))
+ continue;
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- total_rx_packets++;
-
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ /* populate checksum, VLAN, and protocol */
+ i40e_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
#ifdef I40E_FCOE
- if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
+ if (unlikely(
+ i40e_rx_is_fcoe(rx_ptype) &&
+ !i40e_fcoe_handle_offload(rx_ring, rx_desc, skb))) {
dev_kfree_skb_any(skb);
continue;
}
#endif
+
+ vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+ le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
+
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
- } while (likely(total_rx_packets < budget));
+ /* update budget accounting */
+ total_rx_packets++;
+ }
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -1849,6 +1844,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* guarantee a trip back through this routine if there was a failure */
return failure ? budget : total_rx_packets;
}
@@ -1975,9 +1971,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- clean_complete = clean_complete &&
- i40e_clean_tx_irq(ring, vsi->work_limit);
- arm_wb = arm_wb || ring->arm_wb;
+ if (!i40e_clean_tx_irq(vsi, ring, budget)) {
+ clean_complete = false;
+ continue;
+ }
+ arm_wb |= ring->arm_wb;
ring->arm_wb = false;
}
@@ -1991,16 +1989,12 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned;
-
- if (ring_is_ps_enabled(ring))
- cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
- else
- cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+ int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
- /* if we didn't clean as many as budgeted, we must be done */
- clean_complete = clean_complete && (budget_per_ring > cleaned);
+ /* if we clean as many as budgeted, we must not be done */
+ if (cleaned >= budget_per_ring)
+ clean_complete = false;
}
/* If work not completed, return budget and polling will return */
@@ -2247,15 +2241,13 @@ out:
/**
* i40e_tso - set up the tso context descriptor
- * @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
* @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
-static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
{
u64 cd_cmd, cd_tso_len, cd_mss;
union {
@@ -2292,16 +2284,22 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
ip.v6->payload_len = 0;
}
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+ l4.udp->len = 0;
+
/* determine offset of outer transport header */
l4_offset = l4.hdr - skb->data;
/* remove payload length from outer checksum */
- paylen = (__force u16)l4.udp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.udp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.udp->check, htonl(paylen));
}
/* reset pointers to inner headers */
@@ -2321,9 +2319,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
l4_offset = l4.hdr - skb->data;
/* remove payload length from inner checksum */
- paylen = (__force u16)l4.tcp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.tcp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
@@ -2405,7 +2402,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
unsigned char *hdr;
} l4;
unsigned char *exthdr;
- u32 offset, cmd = 0, tunnel = 0;
+ u32 offset, cmd = 0;
__be16 frag_off;
u8 l4_proto = 0;
@@ -2419,6 +2416,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
if (skb->encapsulation) {
+ u32 tunnel = 0;
/* define outer network header type */
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
@@ -2436,13 +2434,6 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
&l4_proto, &frag_off);
}
- /* compute outer L3 header size */
- tunnel |= ((l4.hdr - ip.hdr) / 4) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
-
- /* switch IP header pointer from outer to inner header */
- ip.hdr = skb_inner_network_header(skb);
-
/* define outer transport */
switch (l4_proto) {
case IPPROTO_UDP:
@@ -2453,6 +2444,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ *tx_flags |= I40E_TX_FLAGS_UDP_TUNNEL;
+ l4.hdr = skb_inner_network_header(skb);
+ break;
default:
if (*tx_flags & I40E_TX_FLAGS_TSO)
return -1;
@@ -2461,12 +2457,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
return 0;
}
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
/* compute tunnel header size */
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
/* indicate if we need to offload outer UDP header */
if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
@@ -2716,6 +2720,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = first;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
@@ -2723,12 +2729,14 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
dma_unmap_len_set(tx_bi, len, size);
dma_unmap_addr_set(tx_bi, dma, dma);
+ /* align size to end of page */
+ max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
tx_desc->buffer_addr = cpu_to_le64(dma);
while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset,
- I40E_MAX_DATA_PER_TXD, td_tag);
+ max_data, td_tag);
tx_desc++;
i++;
@@ -2739,9 +2747,10 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i = 0;
}
- dma += I40E_MAX_DATA_PER_TXD;
- size -= I40E_MAX_DATA_PER_TXD;
+ dma += max_data;
+ size -= max_data;
+ max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
tx_desc->buffer_addr = cpu_to_le64(dma);
}
@@ -2891,7 +2900,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
- count = TXD_USE_COUNT(skb->len);
+ count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
@@ -2922,7 +2931,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
+ tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
goto out_drop;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index a9bd70537d65..b78c810d1835 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -102,8 +102,8 @@ enum i40e_dyn_idx_t {
(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
-/* Supported Rx Buffer Sizes */
-#define I40E_RXBUFFER_512 512 /* Used for packet split */
+/* Supported Rx Buffer Sizes (a multiple of 128) */
+#define I40E_RXBUFFER_256 256
#define I40E_RXBUFFER_2048 2048
#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
#define I40E_RXBUFFER_4096 4096
@@ -114,9 +114,28 @@ enum i40e_dyn_idx_t {
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
* this adds up to 512 bytes of extra data meaning the smallest allocation
* we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
+ * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
*/
-#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+/**
+ * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+ const u64 stat_err_bits)
+{
+ return !!(rx_desc->wb.qword1.status_error_len &
+ cpu_to_le64(stat_err_bits));
+}
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -142,14 +161,41 @@ enum i40e_dyn_idx_t {
prefetch((n)); \
} while (0)
-#define i40e_rx_desc i40e_32byte_rx_desc
-
#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
-#define I40E_MAX_DATA_PER_TXD 8192
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define I40E_MAX_READ_REQ_SIZE 4096
+#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
+#define I40E_MAX_DATA_PER_TXD_ALIGNED \
+ (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
+
+/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
+ * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
+ * that 12K is not a power of 2 and division is expensive. It is used to
+ * approximate the number of descriptors used per linear buffer. Note
+ * that this will overestimate in some cases as it doesn't account for the
+ * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
+ * the error should not impact things much as large buffers usually mean
+ * we will use fewer descriptors then there are frags in an skb.
+ */
+static inline unsigned int i40e_txd_use_count(unsigned int size)
+{
+ const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
+ const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
+ unsigned int adjust = ~(u32)0;
+
+ /* if we rounded up on the reciprocal pull down the adjustment */
+ if ((max * reciprocal) > adjust)
+ adjust = ~(u32)(reciprocal - 1);
+
+ return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+}
/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
@@ -184,10 +230,8 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
struct sk_buff *skb;
- void *hdr_buf;
dma_addr_t dma;
struct page *page;
- dma_addr_t page_dma;
unsigned int page_offset;
};
@@ -216,22 +260,18 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
- __I40E_RX_PS_ENABLED,
- __I40E_RX_16BYTE_DESC_ENABLED,
};
-#define ring_is_ps_enabled(ring) \
- test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
- set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
- clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define ring_is_16byte_desc_enabled(ring) \
- test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define set_ring_16byte_desc_enabled(ring) \
- set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define clear_ring_16byte_desc_enabled(ring) \
- clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+/* some useful defines for virtchannel interface, which
+ * is the only remaining user of header split
+ */
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_HEADER_SPLIT 1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
/* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring {
@@ -258,16 +298,7 @@ struct i40e_ring {
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
- u16 rx_hdr_len;
u16 rx_buf_len;
- u8 dtype;
-#define I40E_RX_DTYPE_NO_SPLIT 0
-#define I40E_RX_DTYPE_HEADER_SPLIT 1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
-#define I40E_RX_SPLIT_L2 0x1
-#define I40E_RX_SPLIT_IP 0x2
-#define I40E_RX_SPLIT_TCP_UDP 0x4
-#define I40E_RX_SPLIT_SCTP 0x8
/* used in interrupt processing */
u16 next_to_use;
@@ -301,6 +332,7 @@ struct i40e_ring {
struct i40e_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
+ u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
@@ -324,9 +356,7 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
-void i40e_alloc_rx_headers(struct i40e_ring *rxr);
+bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
void i40e_clean_rx_ring(struct i40e_ring *rx_ring);
@@ -377,7 +407,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
int count = 0, size = skb_headlen(skb);
for (;;) {
- count += TXD_USE_COUNT(size);
+ count += i40e_txd_use_count(size);
if (!nr_frags--)
break;
@@ -423,4 +453,14 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
/* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD;
}
+
+/**
+ * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
+ * @ptype: the packet type field from Rx descriptor write-back
+ **/
+static inline bool i40e_rx_is_fcoe(u16 ptype)
+{
+ return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
+ (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 3335f9d13374..bd5f13bef83c 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -36,7 +36,7 @@
#include "i40e_devids.h"
/* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) (mask << shift)
+#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
@@ -275,6 +275,11 @@ struct i40e_hw_capabilities {
#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
#define I40E_FLEX10_STATUS_VC_MODE 0x2
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -549,6 +554,8 @@ struct i40e_hw {
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
struct i40e_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -1533,4 +1540,37 @@ struct i40e_lldp_variables {
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_L3_SRC_SHIFT 47
+#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
+#define I40E_L3_V6_SRC_SHIFT 43
+#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT)
+#define I40E_L3_DST_SHIFT 35
+#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT)
+#define I40E_L3_V6_DST_SHIFT 35
+#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT)
+#define I40E_L4_SRC_SHIFT 34
+#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT)
+#define I40E_L4_DST_SHIFT 33
+#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT)
+#define I40E_VERIFY_TAG_SHIFT 31
+#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT)
+
+#define I40E_FLEX_50_SHIFT 13
+#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT)
+#define I40E_FLEX_51_SHIFT 12
+#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT)
+#define I40E_FLEX_52_SHIFT 11
+#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT)
+#define I40E_FLEX_53_SHIFT 10
+#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT)
+#define I40E_FLEX_54_SHIFT 9
+#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT)
+#define I40E_FLEX_55_SHIFT 8
+#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT)
+#define I40E_FLEX_56_SHIFT 7
+#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
+#define I40E_FLEX_57_SHIFT 6
+#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index ab866cf3dc18..c92a3bdee229 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -80,10 +80,15 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
I40E_VIRTCHNL_OP_IWARP = 20,
I40E_VIRTCHNL_OP_CONFIG_IWARP_IRQ_MAP = 21,
I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP = 22,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
+
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -157,6 +162,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
@@ -165,8 +171,8 @@ struct i40e_virtchnl_vf_resource {
u16 max_mtu;
u32 vf_offload_flags;
- u32 max_fcoe_contexts;
- u32 max_fcoe_filters;
+ u32 rss_key_size;
+ u32 rss_lut_size;
struct i40e_virtchnl_vsi_resource vsi_res[1];
};
@@ -325,6 +331,39 @@ struct i40e_virtchnl_promisc_info {
* PF replies with struct i40e_eth_stats in an external buffer.
*/
+/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
+ * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct i40e_virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+struct i40e_virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * I40E_VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
+ */
+struct i40e_virtchnl_rss_hena {
+ u64 hena;
+};
+
/* I40E_VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 816c6bbf7093..6fcbf764f32b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -48,7 +48,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
int i;
for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
/* Not all vfs are enabled so skip the ones that are not */
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states) &&
!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
@@ -63,7 +63,7 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
}
/**
- * i40e_vc_notify_link_state
+ * i40e_vc_notify_vf_link_state
* @vf: pointer to the VF structure
*
* send a link status message to a single VF
@@ -74,7 +74,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf)
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
struct i40e_link_status *ls = &pf->hw.phy.link_info;
- int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+ int abs_vf_id = vf->vf_id + (int)hw->func_caps.vf_base_id;
pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
@@ -141,7 +141,7 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states))
return;
- abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
+ abs_vf_id = vf->vf_id + (int)vf->pf->hw.func_caps.vf_base_id;
pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
@@ -590,7 +590,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
}
rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
- /* set splitalways mode 10b */
+ /* set split mode 10b */
rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
}
@@ -665,7 +665,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
goto error_alloc_vsi_res;
}
if (type == I40E_VSI_SRIOV) {
- u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ u64 hena = i40e_pf_get_default_rss_hena(pf);
vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_id = vsi->id;
@@ -688,13 +688,11 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
"Could not add MAC filter %pM for VF %d\n",
vf->default_lan_addr.addr, vf->vf_id);
}
- f = i40e_add_filter(vsi, brdcast,
- vf->port_vlan_id ? vf->port_vlan_id : -1,
- true, false);
- if (!f)
- dev_info(&pf->pdev->dev,
- "Could not allocate VF broadcast filter\n");
spin_unlock_bh(&vsi->mac_filter_list_lock);
+ i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id),
+ (u32)hena);
+ i40e_write_rx_ctl(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id),
+ (u32)(hena >> 32));
}
/* program mac filter */
@@ -860,7 +858,11 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
if (ret)
goto error_alloc;
total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
- set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+
+ if (vf->trusted)
+ set_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
+ else
+ clear_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
/* store the total qps number for the runtime
* VF req validation
@@ -917,9 +919,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
+ u32 reg, reg_idx, bit_idx;
bool rsd = false;
int i;
- u32 reg;
if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state))
return;
@@ -937,6 +939,11 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
i40e_flush(hw);
}
+ /* clear the VFLR bit in GLGEN_VFLRSTAT */
+ reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
+ bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
+ wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ i40e_flush(hw);
if (i40e_quiesce_vf_pci(vf))
dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n",
@@ -988,6 +995,7 @@ complete_reset:
}
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
+
i40e_flush(hw);
clear_bit(__I40E_VF_DISABLE, &pf->state);
}
@@ -1227,8 +1235,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
/* single place to detect unsuccessful return values */
if (v_retval) {
vf->num_invalid_msgs++;
- dev_err(&pf->pdev->dev, "VF %d failed opcode %d, error: %d\n",
- vf->vf_id, v_opcode, v_retval);
+ dev_info(&pf->pdev->dev, "VF %d failed opcode %d, retval: %d\n",
+ vf->vf_id, v_opcode, v_retval);
if (vf->num_invalid_msgs >
I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
dev_err(&pf->pdev->dev,
@@ -1246,9 +1254,9 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret) {
- dev_err(&pf->pdev->dev,
- "Unable to send the message to VF %d aq_err %d\n",
- vf->vf_id, pf->hw.aq.asq_last_status);
+ dev_info(&pf->pdev->dev,
+ "Unable to send the message to VF %d aq_err %d\n",
+ vf->vf_id, pf->hw.aq.asq_last_status);
return -EIO;
}
@@ -1306,8 +1314,8 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
struct i40e_pf *pf = vf->pf;
i40e_status aq_ret = 0;
struct i40e_vsi *vsi;
- int i = 0, len = 0;
int num_vsis = 1;
+ int len = 0;
int ret;
if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
@@ -1342,12 +1350,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
set_bit(I40E_VF_STAT_IWARPENA, &vf->vf_states);
}
- if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) {
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
- vfres->vf_offload_flags |=
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF;
} else {
- vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
+ if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
+ (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ))
+ vfres->vf_offload_flags |=
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ;
+ else
+ vfres->vf_offload_flags |=
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG;
}
if (pf->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) {
@@ -1356,8 +1368,16 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2;
}
- if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING)
+ if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING) {
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ dev_err(&pf->pdev->dev,
+ "VF %d requested polling mode: this feature is supported only when the device is running in single function per port (SFP) mode\n",
+ vf->vf_id);
+ ret = I40E_ERR_PARAM;
+ goto err;
+ }
vfres->vf_offload_flags |= I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING;
+ }
if (pf->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) {
if (vf->driver_caps & I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
@@ -1368,16 +1388,18 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs;
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+ vfres->rss_key_size = I40E_HKEY_ARRAY_SIZE;
+ vfres->rss_lut_size = I40E_VF_HLUT_ARRAY_SIZE;
+
if (vf->lan_vsi_idx) {
- vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
- vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
- vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs;
+ vfres->vsi_res[0].vsi_id = vf->lan_vsi_id;
+ vfres->vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+ vfres->vsi_res[0].num_queue_pairs = vsi->alloc_queue_pairs;
/* VFs only use TC 0 */
- vfres->vsi_res[i].qset_handle
+ vfres->vsi_res[0].qset_handle
= le16_to_cpu(vsi->info.qs_handle[0]);
- ether_addr_copy(vfres->vsi_res[i].default_mac_addr,
+ ether_addr_copy(vfres->vsi_res[0].default_mac_addr,
vf->default_lan_addr.addr);
- i++;
}
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
@@ -1407,6 +1429,25 @@ static void i40e_vc_reset_vf_msg(struct i40e_vf *vf)
}
/**
+ * i40e_getnum_vf_vsi_vlan_filters
+ * @vsi: pointer to the vsi
+ *
+ * called to get the number of VLANs offloaded on this VF
+ **/
+static inline int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi)
+{
+ struct i40e_mac_filter *f;
+ int num_vlans = 0;
+
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID)
+ num_vlans++;
+ }
+
+ return num_vlans;
+}
+
+/**
* i40e_vc_config_promiscuous_mode_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1422,22 +1463,128 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
(struct i40e_virtchnl_promisc_info *)msg;
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- struct i40e_vsi *vsi;
+ struct i40e_mac_filter *f;
+ i40e_status aq_ret = 0;
bool allmulti = false;
- i40e_status aq_ret;
+ struct i40e_vsi *vsi;
+ bool alluni = false;
+ int aq_err = 0;
vsi = i40e_find_vsi_from_id(pf, info->vsi_id);
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
- !i40e_vc_isvalid_vsi_id(vf, info->vsi_id) ||
- (vsi->type != I40E_VSI_FCOE)) {
+ !i40e_vc_isvalid_vsi_id(vf, info->vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "Unprivileged VF %d is attempting to configure promiscuous mode\n",
+ vf->vf_id);
+ /* Lie to the VF on purpose. */
+ aq_ret = 0;
+ goto error_param;
+ }
+ /* Multicast promiscuous handling*/
if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
allmulti = true;
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
- allmulti, NULL);
+
+ if (vf->port_vlan_id) {
+ aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw, vsi->seid,
+ allmulti,
+ vf->port_vlan_id,
+ NULL);
+ } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ if (f->vlan < 0 || f->vlan > I40E_MAX_VLANID)
+ continue;
+ aq_ret = i40e_aq_set_vsi_mc_promisc_on_vlan(hw,
+ vsi->seid,
+ allmulti,
+ f->vlan,
+ NULL);
+ aq_err = pf->hw.aq.asq_last_status;
+ if (aq_ret) {
+ dev_err(&pf->pdev->dev,
+ "Could not add VLAN %d to multicast promiscuous domain err %s aq_err %s\n",
+ f->vlan,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ break;
+ }
+ }
+ } else {
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
+ allmulti, NULL);
+ aq_err = pf->hw.aq.asq_last_status;
+ if (aq_ret) {
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set multicast promiscuous mode err %s aq_err %s\n",
+ vf->vf_id,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ goto error_param_int;
+ }
+ }
+
+ if (!aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully set multicast promiscuous mode\n",
+ vf->vf_id);
+ if (allmulti)
+ set_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+ else
+ clear_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states);
+ }
+
+ if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
+ alluni = true;
+ if (vf->port_vlan_id) {
+ aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, vsi->seid,
+ alluni,
+ vf->port_vlan_id,
+ NULL);
+ } else if (i40e_getnum_vf_vsi_vlan_filters(vsi)) {
+ list_for_each_entry(f, &vsi->mac_filter_list, list) {
+ aq_ret = 0;
+ if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) {
+ aq_ret =
+ i40e_aq_set_vsi_uc_promisc_on_vlan(hw,
+ vsi->seid,
+ alluni,
+ f->vlan,
+ NULL);
+ aq_err = pf->hw.aq.asq_last_status;
+ }
+ if (aq_ret)
+ dev_err(&pf->pdev->dev,
+ "Could not add VLAN %d to Unicast promiscuous domain err %s aq_err %s\n",
+ f->vlan,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
+ } else {
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid,
+ allmulti, NULL,
+ true);
+ aq_err = pf->hw.aq.asq_last_status;
+ if (aq_ret)
+ dev_err(&pf->pdev->dev,
+ "VF %d failed to set unicast promiscuous mode %8.8x err %s aq_err %s\n",
+ vf->vf_id, info->flags,
+ i40e_stat_str(&pf->hw, aq_ret),
+ i40e_aq_str(&pf->hw, aq_err));
+ }
+
+error_param_int:
+ if (!aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "VF %d successfully set unicast promiscuous mode\n",
+ vf->vf_id);
+ if (alluni)
+ set_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+ else
+ clear_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states);
+ }
error_param:
/* send the response to the VF */
@@ -1688,6 +1835,10 @@ error_param:
(u8 *)&stats, sizeof(stats));
}
+/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
+#define I40E_VC_MAX_MAC_ADDR_PER_VF 8
+#define I40E_VC_MAX_VLAN_PER_VF 8
+
/**
* i40e_check_vf_permission
* @vf: pointer to the VF info
@@ -1708,15 +1859,22 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf, u8 *macaddr)
dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr);
ret = I40E_ERR_INVALID_MAC_ADDR;
} else if (vf->pf_set_mac && !is_multicast_ether_addr(macaddr) &&
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) &&
!ether_addr_equal(macaddr, vf->default_lan_addr.addr)) {
/* If the host VMM administrator has set the VF MAC address
* administratively via the ndo_set_vf_mac command then deny
* permission to the VF to add or delete unicast MAC addresses.
+ * Unless the VF is privileged and then it can do whatever.
* The VF may request to set the MAC address filter already
* assigned to it so do not return an error in that case.
*/
dev_err(&pf->pdev->dev,
- "VF attempting to override administratively set MAC address\nPlease reload the VF driver to resume normal operation\n");
+ "VF attempting to override administratively set MAC address, reload the VF driver to resume normal operation\n");
+ ret = -EPERM;
+ } else if ((vf->num_mac >= I40E_VC_MAX_MAC_ADDR_PER_VF) &&
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "VF is not trusted, switch the VF to trusted to add more functionality\n");
ret = -EPERM;
}
return ret;
@@ -1741,7 +1899,6 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -1780,6 +1937,8 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
ret = I40E_ERR_PARAM;
spin_unlock_bh(&vsi->mac_filter_list_lock);
goto error_param;
+ } else {
+ vf->num_mac++;
}
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
@@ -1815,7 +1974,6 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
ret = I40E_ERR_PARAM;
goto error_param;
@@ -1839,6 +1997,8 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
ret = I40E_ERR_INVALID_MAC_ADDR;
spin_unlock_bh(&vsi->mac_filter_list_lock);
goto error_param;
+ } else {
+ vf->num_mac--;
}
spin_unlock_bh(&vsi->mac_filter_list_lock);
@@ -1873,8 +2033,13 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
i40e_status aq_ret = 0;
int i;
+ if ((vf->num_vlan >= I40E_VC_MAX_VLAN_PER_VF) &&
+ !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "VF is not trusted, switch the VF to trusted to add more VLAN addresses\n");
+ goto error_param;
+ }
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -1898,6 +2063,19 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < vfl->num_elements; i++) {
/* add new VLAN filter */
int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
+ if (!ret)
+ vf->num_vlan++;
+
+ if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+ true,
+ vfl->vlan_id[i],
+ NULL);
+ if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+ true,
+ vfl->vlan_id[i],
+ NULL);
if (ret)
dev_err(&pf->pdev->dev,
@@ -1929,7 +2107,6 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
int i;
if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
- !test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) ||
!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
@@ -1950,6 +2127,19 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < vfl->num_elements; i++) {
int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
+ if (!ret)
+ vf->num_vlan--;
+
+ if (test_bit(I40E_VF_STAT_UC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_uc_promisc_on_vlan(&pf->hw, vsi->seid,
+ false,
+ vfl->vlan_id[i],
+ NULL);
+ if (test_bit(I40E_VF_STAT_MC_PROMISC, &vf->vf_states))
+ i40e_aq_set_vsi_mc_promisc_on_vlan(&pf->hw, vsi->seid,
+ false,
+ vfl->vlan_id[i],
+ NULL);
if (ret)
dev_err(&pf->pdev->dev,
@@ -2029,6 +2219,135 @@ error_param:
}
/**
+ * i40e_vc_config_rss_key
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Configure the VF's RSS key
+ **/
+static int i40e_vc_config_rss_key(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_key *vrk =
+ (struct i40e_virtchnl_rss_key *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = vrk->vsi_id;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (vrk->key_len != I40E_HKEY_ARRAY_SIZE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ aq_ret = i40e_config_rss(vsi, vrk->key, NULL, 0);
+err:
+ /* send the response to the VF */
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_config_rss_lut
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Configure the VF's RSS LUT
+ **/
+static int i40e_vc_config_rss_lut(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_lut *vrl =
+ (struct i40e_virtchnl_rss_lut *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ u16 vsi_id = vrl->vsi_id;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
+ !i40e_vc_isvalid_vsi_id(vf, vsi_id) ||
+ (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ aq_ret = i40e_config_rss(vsi, NULL, vrl->lut, I40E_VF_HLUT_ARRAY_SIZE);
+ /* send the response to the VF */
+err:
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_get_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Return the RSS HENA bits allowed by the hardware
+ **/
+static int i40e_vc_get_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_hena *vrh = NULL;
+ struct i40e_pf *pf = vf->pf;
+ i40e_status aq_ret = 0;
+ int len = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+ len = sizeof(struct i40e_virtchnl_rss_hena);
+
+ vrh = kzalloc(len, GFP_KERNEL);
+ if (!vrh) {
+ aq_ret = I40E_ERR_NO_MEMORY;
+ len = 0;
+ goto err;
+ }
+ vrh->hena = i40e_pf_get_default_rss_hena(pf);
+err:
+ /* send the response back to the VF */
+ aq_ret = i40e_vc_send_msg_to_vf(vf, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ aq_ret, (u8 *)vrh, len);
+ return aq_ret;
+}
+
+/**
+ * i40e_vc_set_rss_hena
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ * @msglen: msg length
+ *
+ * Set the RSS HENA bits for the VF
+ **/
+static int i40e_vc_set_rss_hena(struct i40e_vf *vf, u8 *msg, u16 msglen)
+{
+ struct i40e_virtchnl_rss_hena *vrh =
+ (struct i40e_virtchnl_rss_hena *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)vrh->hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_id),
+ (u32)(vrh->hena >> 32));
+
+ /* send the response to the VF */
+err:
+ return i40e_vc_send_resp_to_vf(vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ aq_ret);
+}
+
+/**
* i40e_vc_validate_vf_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -2041,7 +2360,7 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
bool err_msg_format = false;
- int valid_len;
+ int valid_len = 0;
/* Check if VF is disabled. */
if (test_bit(I40E_VF_STAT_DISABLED, &vf->vf_states))
@@ -2053,13 +2372,10 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
valid_len = sizeof(struct i40e_virtchnl_version_info);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
- valid_len = 0;
break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
if (VF_IS_V11(vf))
valid_len = sizeof(u32);
- else
- valid_len = 0;
break;
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
valid_len = sizeof(struct i40e_virtchnl_txq_info);
@@ -2149,6 +2465,35 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
sizeof(struct i40e_virtchnl_iwarp_qv_info));
}
break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ valid_len = sizeof(struct i40e_virtchnl_rss_key);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_rss_key *vrk =
+ (struct i40e_virtchnl_rss_key *)msg;
+ if (vrk->key_len != I40E_HKEY_ARRAY_SIZE) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += vrk->key_len - 1;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ valid_len = sizeof(struct i40e_virtchnl_rss_lut);
+ if (msglen >= valid_len) {
+ struct i40e_virtchnl_rss_lut *vrl =
+ (struct i40e_virtchnl_rss_lut *)msg;
+ if (vrl->lut_entries != I40E_VF_HLUT_ARRAY_SIZE) {
+ err_msg_format = true;
+ break;
+ }
+ valid_len += vrl->lut_entries - 1;
+ }
+ break;
+ case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ break;
+ case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ valid_len = sizeof(struct i40e_virtchnl_rss_hena);
+ break;
/* These are always errors coming from the VF. */
case I40E_VIRTCHNL_OP_EVENT:
case I40E_VIRTCHNL_OP_UNKNOWN:
@@ -2175,11 +2520,11 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
* called from the common aeq/arq handler to
* process request from VF
**/
-int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen)
{
struct i40e_hw *hw = &pf->hw;
- unsigned int local_vf_id = vf_id - hw->func_caps.vf_base_id;
+ int local_vf_id = vf_id - (s16)hw->func_caps.vf_base_id;
struct i40e_vf *vf;
int ret;
@@ -2247,6 +2592,19 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
case I40E_VIRTCHNL_OP_RELEASE_IWARP_IRQ_MAP:
ret = i40e_vc_iwarp_qvmap_msg(vf, msg, msglen, false);
break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ ret = i40e_vc_config_rss_key(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ ret = i40e_vc_config_rss_lut(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ ret = i40e_vc_get_rss_hena(vf, msg, msglen);
+ break;
+ case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ ret = i40e_vc_set_rss_hena(vf, msg, msglen);
+ break;
+
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
@@ -2268,9 +2626,10 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
**/
int i40e_vc_process_vflr_event(struct i40e_pf *pf)
{
- u32 reg, reg_idx, bit_idx, vf_id;
struct i40e_hw *hw = &pf->hw;
+ u32 reg, reg_idx, bit_idx;
struct i40e_vf *vf;
+ int vf_id;
if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
return 0;
@@ -2292,13 +2651,9 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* read GLGEN_VFLRSTAT register to find out the flr VFs */
vf = &pf->vf[vf_id];
reg = rd32(hw, I40E_GLGEN_VFLRSTAT(reg_idx));
- if (reg & BIT(bit_idx)) {
- /* clear the bit in GLGEN_VFLRSTAT */
- wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
-
- if (!test_bit(__I40E_DOWN, &pf->state))
- i40e_reset_vf(vf, true);
- }
+ if (reg & BIT(bit_idx))
+ /* i40e_reset_vf will clear the bit in GLGEN_VFLRSTAT */
+ i40e_reset_vf(vf, true);
}
return 0;
@@ -2762,3 +3117,45 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable)
out:
return ret;
}
+
+/**
+ * i40e_ndo_set_vf_trust
+ * @netdev: network interface device structure of the pf
+ * @vf_id: VF identifier
+ * @setting: trust setting
+ *
+ * Enable or disable VF trust setting
+ **/
+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ return -EINVAL;
+ }
+
+ if (pf->flags & I40E_FLAG_MFP_ENABLED) {
+ dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n");
+ return -EINVAL;
+ }
+
+ vf = &pf->vf[vf_id];
+
+ if (!vf)
+ return -EINVAL;
+ if (setting == vf->trusted)
+ goto out;
+
+ vf->trusted = setting;
+ i40e_vc_notify_vf_reset(vf);
+ i40e_reset_vf(vf, false);
+ dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
+ vf_id, setting ? "" : "un");
+out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index e7b2fba0309e..875174141451 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -61,6 +61,8 @@ enum i40e_vf_states {
I40E_VF_STAT_IWARPENA,
I40E_VF_STAT_FCOEENA,
I40E_VF_STAT_DISABLED,
+ I40E_VF_STAT_MC_PROMISC,
+ I40E_VF_STAT_UC_PROMISC,
};
/* VF capabilities */
@@ -75,7 +77,7 @@ struct i40e_vf {
struct i40e_pf *pf;
/* VF id in the PF space */
- u16 vf_id;
+ s16 vf_id;
/* all VF vsis connect to the same parent */
enum i40e_switch_element_types parent_type;
struct i40e_virtchnl_version_info vf_ver;
@@ -88,6 +90,7 @@ struct i40e_vf {
struct i40e_virtchnl_ether_addr default_fcoe_addr;
u16 port_vlan_id;
bool pf_set_mac; /* The VMM admin set the VF MAC address */
+ bool trusted;
/* VSI indices - actual VSI pointers are maintained in the PF structure
* When assigned, these will be non-zero, because VSI 0 is always
@@ -108,6 +111,9 @@ struct i40e_vf {
bool link_forced;
bool link_up; /* only valid if VF link is forced */
bool spoofchk;
+ u16 num_mac;
+ u16 num_vlan;
+
/* RDMA Client */
struct i40e_virtchnl_iwarp_qvlist_info *qvlist_info;
};
@@ -115,7 +121,7 @@ struct i40e_vf {
void i40e_free_vfs(struct i40e_pf *pf);
int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
-int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
+int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
void i40e_reset_vf(struct i40e_vf *vf, bool flr);
@@ -127,6 +133,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos);
int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
int max_tx_rate);
+int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting);
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi);
int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index a3eae5d9a2bd..1f9b3b5d946d 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -97,7 +97,6 @@ struct i40e_adminq_info {
u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_release_on_done;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index aad8d6277110..3114dcfa1724 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -78,17 +78,17 @@ struct i40e_aq_desc {
#define I40E_AQ_FLAG_EI_SHIFT 14
#define I40E_AQ_FLAG_FE_SHIFT 15
-#define I40E_AQ_FLAG_DD (1 << I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
-#define I40E_AQ_FLAG_CMP (1 << I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
-#define I40E_AQ_FLAG_ERR (1 << I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
-#define I40E_AQ_FLAG_VFE (1 << I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
-#define I40E_AQ_FLAG_LB (1 << I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
-#define I40E_AQ_FLAG_RD (1 << I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
-#define I40E_AQ_FLAG_VFC (1 << I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
-#define I40E_AQ_FLAG_BUF (1 << I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
-#define I40E_AQ_FLAG_SI (1 << I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
-#define I40E_AQ_FLAG_EI (1 << I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
-#define I40E_AQ_FLAG_FE (1 << I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
+#define I40E_AQ_FLAG_DD BIT(I40E_AQ_FLAG_DD_SHIFT) /* 0x1 */
+#define I40E_AQ_FLAG_CMP BIT(I40E_AQ_FLAG_CMP_SHIFT) /* 0x2 */
+#define I40E_AQ_FLAG_ERR BIT(I40E_AQ_FLAG_ERR_SHIFT) /* 0x4 */
+#define I40E_AQ_FLAG_VFE BIT(I40E_AQ_FLAG_VFE_SHIFT) /* 0x8 */
+#define I40E_AQ_FLAG_LB BIT(I40E_AQ_FLAG_LB_SHIFT) /* 0x200 */
+#define I40E_AQ_FLAG_RD BIT(I40E_AQ_FLAG_RD_SHIFT) /* 0x400 */
+#define I40E_AQ_FLAG_VFC BIT(I40E_AQ_FLAG_VFC_SHIFT) /* 0x800 */
+#define I40E_AQ_FLAG_BUF BIT(I40E_AQ_FLAG_BUF_SHIFT) /* 0x1000 */
+#define I40E_AQ_FLAG_SI BIT(I40E_AQ_FLAG_SI_SHIFT) /* 0x2000 */
+#define I40E_AQ_FLAG_EI BIT(I40E_AQ_FLAG_EI_SHIFT) /* 0x4000 */
+#define I40E_AQ_FLAG_FE BIT(I40E_AQ_FLAG_FE_SHIFT) /* 0x8000 */
/* error codes */
enum i40e_admin_queue_err {
@@ -205,10 +205,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
- /* hmc */
- i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
- i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
-
/* phy commands*/
i40e_aqc_opc_get_phy_abilities = 0x0600,
i40e_aqc_opc_set_phy_config = 0x0601,
@@ -426,6 +422,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_SDP 0x0062
#define I40E_AQ_CAP_ID_MDIO 0x0063
#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
#define I40E_AQ_CAP_ID_FLEX10 0x00F1
#define I40E_AQ_CAP_ID_CEM 0x00F2
@@ -1582,27 +1579,6 @@ struct i40e_aqc_configure_partition_bw_data {
I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data);
-/* Get and set the active HMC resource profile and status.
- * (direct 0x0500) and (direct 0x0501)
- */
-struct i40e_aq_get_set_hmc_resource_profile {
- u8 pm_profile;
- u8 pe_vf_enabled;
- u8 reserved[14];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
-
-enum i40e_aq_hmc_profile {
- /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
- I40E_HMC_PROFILE_DEFAULT = 1,
- I40E_HMC_PROFILE_FAVOR_VF = 2,
- I40E_HMC_PROFILE_EQUAL = 3,
-};
-
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
-
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
@@ -1649,11 +1625,11 @@ enum i40e_aq_phy_type {
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
- I40E_LINK_SPEED_100MB = (1 << I40E_LINK_SPEED_100MB_SHIFT),
- I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
- I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
- I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_100MB = BIT(I40E_LINK_SPEED_100MB_SHIFT),
+ I40E_LINK_SPEED_1GB = BIT(I40E_LINK_SPEED_1000MB_SHIFT),
+ I40E_LINK_SPEED_10GB = BIT(I40E_LINK_SPEED_10GB_SHIFT),
+ I40E_LINK_SPEED_40GB = BIT(I40E_LINK_SPEED_40GB_SHIFT),
+ I40E_LINK_SPEED_20GB = BIT(I40E_LINK_SPEED_20GB_SHIFT)
};
struct i40e_aqc_module_desc {
@@ -1924,9 +1900,9 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
/* Used for 0x0704 as well as for 0x0705 commands */
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1
#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \
- (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
+ BIT(I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT)
#define I40E_AQ_ANVM_FEATURE 0
-#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT)
+#define I40E_AQ_ANVM_IMMEDIATE_FIELD BIT(FEATURE_OR_IMMEDIATE_SHIFT)
struct i40e_aqc_nvm_config_data_feature {
__le16 feature_id;
#define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01
@@ -2195,7 +2171,7 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
struct i40e_aqc_get_set_rss_key {
-#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
@@ -2215,14 +2191,14 @@ struct i40e_aqc_get_set_rss_key_data {
I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
struct i40e_aqc_get_set_rss_lut {
-#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID BIT(15)
#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
__le16 vsi_id;
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
-#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
- I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK \
+ BIT(I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index 771ac6ad8cda..4db0c0326185 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -58,6 +58,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_SFP_X722:
case I40E_DEV_ID_1G_BASE_T_X722:
case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_SFP_I_X722:
hw->mac.type = I40E_MAC_X722;
break;
case I40E_DEV_ID_X722_VF:
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_devids.h b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
index ca8b58c3d1f5..70235706915e 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_devids.h
@@ -44,6 +44,7 @@
#define I40E_DEV_ID_SFP_X722 0x37D0
#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
#define I40E_DEV_ID_X722_VF 0x37CD
#define I40E_DEV_ID_X722_VF_HV 0x37D9
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index cea97daa844c..a579193b2c21 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -155,19 +155,21 @@ u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
- * @tx_ring: tx ring to clean
- * @budget: how many cleans we're allowed
+ * @vsi: the VSI we care about
+ * @tx_ring: Tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
*
* Returns true if there's any budget left (e.g. the clean is finished)
**/
-static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
+static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ struct i40e_ring *tx_ring, int napi_budget)
{
u16 i = tx_ring->next_to_clean;
struct i40e_tx_buffer *tx_buf;
struct i40e_tx_desc *tx_head;
struct i40e_tx_desc *tx_desc;
- unsigned int total_packets = 0;
- unsigned int total_bytes = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+ unsigned int budget = vsi->work_limit;
tx_buf = &tx_ring->tx_bi[i];
tx_desc = I40E_TX_DESC(tx_ring, i);
@@ -197,7 +199,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
total_packets += tx_buf->gso_segs;
/* free the skb */
- dev_kfree_skb_any(tx_buf->skb);
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -257,17 +259,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_packets += total_packets;
if (tx_ring->flags & I40E_TXR_FLAGS_WB_ON_ITR) {
- unsigned int j = 0;
/* check to see if there are < 4 descriptors
* waiting to be written back, then kick the hardware to force
* them to be written back in case we stay in NAPI.
* In this mode on X722 we do not enable Interrupt.
*/
- j = i40evf_get_tx_pending(tx_ring, false);
+ unsigned int j = i40evf_get_tx_pending(tx_ring, false);
if (budget &&
((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ !test_bit(__I40E_DOWN, &vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
}
@@ -285,7 +286,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
- !test_bit(__I40E_DOWN, &tx_ring->vsi->state)) {
+ !test_bit(__I40E_DOWN, &vsi->state)) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
@@ -494,7 +495,6 @@ err:
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
unsigned long bi_size;
u16 i;
@@ -502,48 +502,22 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
if (!rx_ring->rx_bi)
return;
- if (ring_is_ps_enabled(rx_ring)) {
- int bufsz = ALIGN(rx_ring->rx_hdr_len, 256) * rx_ring->count;
-
- rx_bi = &rx_ring->rx_bi[0];
- if (rx_bi->hdr_buf) {
- dma_free_coherent(dev,
- bufsz,
- rx_bi->hdr_buf,
- rx_bi->dma);
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = 0;
- rx_bi->hdr_buf = NULL;
- }
- }
- }
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- if (rx_bi->dma) {
- dma_unmap_single(dev,
- rx_bi->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
- }
+ struct i40e_rx_buffer *rx_bi = &rx_ring->rx_bi[i];
+
if (rx_bi->skb) {
dev_kfree_skb(rx_bi->skb);
rx_bi->skb = NULL;
}
- if (rx_bi->page) {
- if (rx_bi->page_dma) {
- dma_unmap_page(dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page_dma = 0;
- }
- __free_page(rx_bi->page);
- rx_bi->page = NULL;
- rx_bi->page_offset = 0;
- }
+ if (!rx_bi->page)
+ continue;
+
+ dma_unmap_page(dev, rx_bi->dma, PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_pages(rx_bi->page, 0);
+
+ rx_bi->page = NULL;
+ rx_bi->page_offset = 0;
}
bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count;
@@ -552,6 +526,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
/* Zero out the descriptor ring */
memset(rx_ring->desc, 0, rx_ring->size);
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
}
@@ -576,37 +551,6 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring)
}
/**
- * i40evf_alloc_rx_headers - allocate rx header buffers
- * @rx_ring: ring to alloc buffers
- *
- * Allocate rx header buffers for the entire ring. As these are static,
- * this is only called when setting up a new ring.
- **/
-void i40evf_alloc_rx_headers(struct i40e_ring *rx_ring)
-{
- struct device *dev = rx_ring->dev;
- struct i40e_rx_buffer *rx_bi;
- dma_addr_t dma;
- void *buffer;
- int buf_size;
- int i;
-
- if (rx_ring->rx_bi[0].hdr_buf)
- return;
- /* Make sure the buffers don't cross cache line boundaries. */
- buf_size = ALIGN(rx_ring->rx_hdr_len, 256);
- buffer = dma_alloc_coherent(dev, buf_size * rx_ring->count,
- &dma, GFP_KERNEL);
- if (!buffer)
- return;
- for (i = 0; i < rx_ring->count; i++) {
- rx_bi = &rx_ring->rx_bi[i];
- rx_bi->dma = dma + (i * buf_size);
- rx_bi->hdr_buf = buffer + (i * buf_size);
- }
-}
-
-/**
* i40evf_setup_rx_descriptors - Allocate Rx descriptors
* @rx_ring: Rx descriptor ring (for a specific queue) to setup
*
@@ -627,9 +571,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
u64_stats_init(&rx_ring->syncp);
/* Round up to nearest 4K */
- rx_ring->size = ring_is_16byte_desc_enabled(rx_ring)
- ? rx_ring->count * sizeof(union i40e_16byte_rx_desc)
- : rx_ring->count * sizeof(union i40e_32byte_rx_desc);
+ rx_ring->size = rx_ring->count * sizeof(union i40e_32byte_rx_desc);
rx_ring->size = ALIGN(rx_ring->size, 4096);
rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL);
@@ -640,6 +582,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring)
goto err;
}
+ rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
@@ -658,6 +601,10 @@ err:
static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
{
rx_ring->next_to_use = val;
+
+ /* update next to alloc since we have filled the ring */
+ rx_ring->next_to_alloc = val;
+
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -668,160 +615,122 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
}
/**
- * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split
- * @rx_ring: ring to place buffers on
- * @cleaned_count: number of buffers to replace
+ * i40e_alloc_mapped_page - recycle or make a new page
+ * @rx_ring: ring to use
+ * @bi: rx_buffer struct to modify
*
- * Returns true if any errors on allocation
+ * Returns true if the page was successfully allocated or
+ * reused.
**/
-bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
+static bool i40e_alloc_mapped_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *bi)
{
- u16 i = rx_ring->next_to_use;
- union i40e_rx_desc *rx_desc;
- struct i40e_rx_buffer *bi;
- const int current_node = numa_node_id();
+ struct page *page = bi->page;
+ dma_addr_t dma;
- /* do nothing if no valid netdev defined */
- if (!rx_ring->netdev || !cleaned_count)
- return false;
+ /* since we are recycling buffers we should seldom need to alloc */
+ if (likely(page)) {
+ rx_ring->rx_stats.page_reuse_count++;
+ return true;
+ }
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
+ /* alloc new page for storage */
+ page = dev_alloc_page();
+ if (unlikely(!page)) {
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
+ }
- if (bi->skb) /* desc is in use */
- goto no_buffers;
+ /* map page for use */
+ dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
- /* If we've been moved to a different NUMA node, release the
- * page so we can get a new one on the current node.
+ /* if mapping failed free memory back to system since
+ * there isn't much point in holding memory we can't use
*/
- if (bi->page && page_to_nid(bi->page) != current_node) {
- dma_unmap_page(rx_ring->dev,
- bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else if (bi->page) {
- rx_ring->rx_stats.page_reuse_count++;
- }
-
- if (!bi->page) {
- bi->page = alloc_page(GFP_ATOMIC);
- if (!bi->page) {
- rx_ring->rx_stats.alloc_page_failed++;
- goto no_buffers;
- }
- bi->page_dma = dma_map_page(rx_ring->dev,
- bi->page,
- 0,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
- rx_ring->rx_stats.alloc_page_failed++;
- __free_page(bi->page);
- bi->page = NULL;
- bi->page_dma = 0;
- bi->page_offset = 0;
- goto no_buffers;
- }
- bi->page_offset = 0;
- }
-
- /* Refresh the desc even if buffer_addrs didn't change
- * because each write-back erases this info.
- */
- rx_desc->read.pkt_addr =
- cpu_to_le64(bi->page_dma + bi->page_offset);
- rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
- i++;
- if (i == rx_ring->count)
- i = 0;
+ if (dma_mapping_error(rx_ring->dev, dma)) {
+ __free_pages(page, 0);
+ rx_ring->rx_stats.alloc_page_failed++;
+ return false;
}
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ bi->dma = dma;
+ bi->page = page;
+ bi->page_offset = 0;
- return false;
+ return true;
+}
-no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+/**
+ * i40e_receive_skb - Send a completed packet up the stack
+ * @rx_ring: rx ring in play
+ * @skb: packet to send up
+ * @vlan_tag: vlan tag for packet
+ **/
+static void i40e_receive_skb(struct i40e_ring *rx_ring,
+ struct sk_buff *skb, u16 vlan_tag)
+{
+ struct i40e_q_vector *q_vector = rx_ring->q_vector;
- /* make sure to come back via polling to try again after
- * allocation failure
- */
- return true;
+ if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (vlan_tag & VLAN_VID_MASK))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
+
+ napi_gro_receive(&q_vector->napi, skb);
}
/**
- * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
+ * i40evf_alloc_rx_buffers - Replace used receive buffers
* @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace
*
- * Returns true if any errors on allocation
+ * Returns false if all allocations were successful, true if any fail
**/
-bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
+bool i40evf_alloc_rx_buffers(struct i40e_ring *rx_ring, u16 cleaned_count)
{
- u16 i = rx_ring->next_to_use;
+ u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi;
- struct sk_buff *skb;
/* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count)
return false;
- while (cleaned_count--) {
- rx_desc = I40E_RX_DESC(rx_ring, i);
- bi = &rx_ring->rx_bi[i];
- skb = bi->skb;
-
- if (!skb) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_buf_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- goto no_buffers;
- }
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- bi->skb = skb;
- }
+ rx_desc = I40E_RX_DESC(rx_ring, ntu);
+ bi = &rx_ring->rx_bi[ntu];
- if (!bi->dma) {
- bi->dma = dma_map_single(rx_ring->dev,
- skb->data,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(rx_ring->dev, bi->dma)) {
- rx_ring->rx_stats.alloc_buff_failed++;
- bi->dma = 0;
- dev_kfree_skb(bi->skb);
- bi->skb = NULL;
- goto no_buffers;
- }
- }
+ do {
+ if (!i40e_alloc_mapped_page(rx_ring, bi))
+ goto no_buffers;
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info.
+ */
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
rx_desc->read.hdr_addr = 0;
- i++;
- if (i == rx_ring->count)
- i = 0;
- }
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ rx_desc++;
+ bi++;
+ ntu++;
+ if (unlikely(ntu == rx_ring->count)) {
+ rx_desc = I40E_RX_DESC(rx_ring, 0);
+ bi = rx_ring->rx_bi;
+ ntu = 0;
+ }
+
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
+
+ cleaned_count--;
+ } while (cleaned_count);
+
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
return false;
no_buffers:
- if (rx_ring->next_to_use != i)
- i40e_release_rx_desc(rx_ring, i);
+ if (rx_ring->next_to_use != ntu)
+ i40e_release_rx_desc(rx_ring, ntu);
/* make sure to come back via polling to try again after
* allocation failure
@@ -830,41 +739,35 @@ no_buffers:
}
/**
- * i40e_receive_skb - Send a completed packet up the stack
- * @rx_ring: rx ring in play
- * @skb: packet to send up
- * @vlan_tag: vlan tag for packet
- **/
-static void i40e_receive_skb(struct i40e_ring *rx_ring,
- struct sk_buff *skb, u16 vlan_tag)
-{
- struct i40e_q_vector *q_vector = rx_ring->q_vector;
-
- if (vlan_tag & VLAN_VID_MASK)
- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
-
- napi_gro_receive(&q_vector->napi, skb);
-}
-
-/**
* i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
* @vsi: the VSI we care about
* @skb: skb currently being received and modified
- * @rx_status: status value of last descriptor in packet
- * @rx_error: error value of last descriptor in packet
- * @rx_ptype: ptype value of last descriptor in packet
+ * @rx_desc: the receive descriptor
+ *
+ * skb->protocol must be set before this function is called
**/
static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
struct sk_buff *skb,
- u32 rx_status,
- u32 rx_error,
- u16 rx_ptype)
+ union i40e_rx_desc *rx_desc)
{
- struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
- bool ipv4, ipv6, ipv4_tunnel, ipv6_tunnel;
+ struct i40e_rx_ptype_decoded decoded;
+ u32 rx_error, rx_status;
+ bool ipv4, ipv6;
+ u8 ptype;
+ u64 qword;
+
+ qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+ rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
+ I40E_RXD_QW1_ERROR_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
+ decoded = decode_rx_desc_ptype(ptype);
skb->ip_summed = CHECKSUM_NONE;
+ skb_checksum_none_assert(skb);
+
/* Rx csum enabled and ip headers found? */
if (!(vsi->netdev->features & NETIF_F_RXCSUM))
return;
@@ -904,20 +807,23 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
if (rx_error & BIT(I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- /* The hardware supported by this driver does not validate outer
- * checksums for tunneled VXLAN or GENEVE frames. I don't agree
- * with it but the specification states that you "MAY validate", it
- * doesn't make it a hard requirement so if we have validated the
- * inner checksum report CHECKSUM_UNNECESSARY.
+ /* If there is an outer header present that might contain a checksum
+ * we need to bump the checksum level by 1 to reflect the fact that
+ * we are indicating we validated the inner checksum.
*/
-
- ipv4_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
- ipv6_tunnel = (rx_ptype >= I40E_RX_PTYPE_GRENAT6_MAC_PAY3) &&
- (rx_ptype <= I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4);
-
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- skb->csum_level = ipv4_tunnel || ipv6_tunnel;
+ if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT)
+ skb->csum_level = 1;
+
+ /* Only report checksum unnecessary for TCP, UDP, or SCTP */
+ switch (decoded.inner_prot) {
+ case I40E_RX_PTYPE_INNER_PROT_TCP:
+ case I40E_RX_PTYPE_INNER_PROT_UDP:
+ case I40E_RX_PTYPE_INNER_PROT_SCTP:
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ /* fall though */
+ default:
+ break;
+ }
return;
@@ -931,7 +837,7 @@ checksum_fail:
*
* Returns a hash type to be used by skb_set_hash
**/
-static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
+static inline int i40e_ptype_to_htype(u8 ptype)
{
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
@@ -959,7 +865,7 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
u8 rx_ptype)
{
u32 hash;
- const __le64 rss_mask =
+ const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
@@ -973,313 +879,411 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
}
/**
- * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40evf_process_skb_fields - Populate skb header fields from Rx descriptor
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @rx_desc: pointer to the EOP Rx descriptor
+ * @skb: pointer to current skb being populated
+ * @rx_ptype: the packet type decoded by hardware
*
- * Returns true if there's any budget left (e.g. the clean is finished)
+ * This function checks the ring, descriptor, and packet information in
+ * order to populate the hash, checksum, VLAN, protocol, and
+ * other fields within the skb.
**/
-static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
+static inline
+void i40evf_process_skb_fields(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc, struct sk_buff *skb,
+ u8 rx_ptype)
{
- unsigned int total_rx_bytes = 0, total_rx_packets = 0;
- u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
- u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- u16 i = rx_ring->next_to_clean;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u32 copysize;
+ i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
- do {
- struct i40e_rx_buffer *rx_bi;
- struct sk_buff *skb;
- u16 vlan_tag;
- /* return some buffers to hardware, one at a time is too slow */
- if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
- failure = failure ||
- i40evf_alloc_rx_buffers_ps(rx_ring,
- cleaned_count);
- cleaned_count = 0;
- }
+ /* modifies the skb - consumes the enet header */
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
- qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ i40e_rx_checksum(rx_ring->vsi, skb, rx_desc);
- if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
- break;
+ skb_record_rx_queue(skb, rx_ring->queue_index);
+}
- /* This memory barrier is needed to keep us from reading
- * any other fields out of the rx_desc until we know the
- * DD bit is set.
- */
- dma_rmb();
- /* sync header buffer for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- if (likely(!skb)) {
- skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
- rx_ring->rx_hdr_len,
- GFP_ATOMIC |
- __GFP_NOWARN);
- if (!skb) {
- rx_ring->rx_stats.alloc_buff_failed++;
- failure = true;
- break;
- }
+/**
+ * i40e_pull_tail - i40e specific version of skb_pull_tail
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being adjusted
+ *
+ * This function is an i40e specific version of __pskb_pull_tail. The
+ * main difference between this version and the original function is that
+ * this function can make several assumptions about the state of things
+ * that allow for significant optimizations versus the standard function.
+ * As a result we can do things like drop a frag and maintain an accurate
+ * truesize for the skb.
+ */
+static void i40e_pull_tail(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
+ unsigned char *va;
+ unsigned int pull_len;
- /* initialize queue mapping */
- skb_record_rx_queue(skb, rx_ring->queue_index);
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_ring->rx_bi[0].dma,
- i * rx_ring->rx_hdr_len,
- rx_ring->rx_hdr_len,
- DMA_FROM_DEVICE);
- }
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
- rx_header_len = (qword & I40E_RXD_QW1_LENGTH_HBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_HBUF_SHIFT;
- rx_sph = (qword & I40E_RXD_QW1_LENGTH_SPH_MASK) >>
- I40E_RXD_QW1_LENGTH_SPH_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_hbo = rx_error & BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ /* it is valid to use page_address instead of kmap since we are
+ * working with pages allocated out of the lomem pool per
+ * alloc_page(GFP_ATOMIC)
+ */
+ va = skb_frag_address(frag);
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- /* sync half-page for reading */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_bi->page_dma,
- rx_bi->page_offset,
- PAGE_SIZE / 2,
- DMA_FROM_DEVICE);
- prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
- rx_bi->skb = NULL;
- cleaned_count++;
- copysize = 0;
- if (rx_hbo || rx_sph) {
- int len;
-
- if (rx_hbo)
- len = I40E_RX_HDR_SIZE;
- else
- len = rx_header_len;
- memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
- } else if (skb->len == 0) {
- int len;
- unsigned char *va = page_address(rx_bi->page) +
- rx_bi->page_offset;
-
- len = min(rx_packet_len, rx_ring->rx_hdr_len);
- memcpy(__skb_put(skb, len), va, len);
- copysize = len;
- rx_packet_len -= len;
- }
- /* Get the rest of the data if this was a header split */
- if (rx_packet_len) {
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rx_bi->page,
- rx_bi->page_offset + copysize,
- rx_packet_len, I40E_RXBUFFER_2048);
-
- /* If the page count is more than 2, then both halves
- * of the page are used and we need to free it. Do it
- * here instead of in the alloc code. Otherwise one
- * of the half-pages might be released between now and
- * then, and we wouldn't know which one to use.
- * Don't call get_page and free_page since those are
- * both expensive atomic operations that just change
- * the refcount in opposite directions. Just give the
- * page to the stack; he can have our refcount.
- */
- if (page_count(rx_bi->page) > 2) {
- dma_unmap_page(rx_ring->dev,
- rx_bi->page_dma,
- PAGE_SIZE,
- DMA_FROM_DEVICE);
- rx_bi->page = NULL;
- rx_bi->page_dma = 0;
- rx_ring->rx_stats.realloc_count++;
- } else {
- get_page(rx_bi->page);
- /* switch to the other half-page here; the
- * allocation code programs the right addr
- * into HW. If we haven't used this half-page,
- * the address won't be changed, and HW can
- * just use it next time through.
- */
- rx_bi->page_offset ^= PAGE_SIZE / 2;
- }
+ /* we need the header to contain the greater of either ETH_HLEN or
+ * 60 bytes if the skb->len is less than 60 for skb_pad.
+ */
+ pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
- }
- I40E_RX_INCREMENT(rx_ring, i);
+ /* align pull length to size of long to optimize memcpy performance */
+ skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long)));
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- struct i40e_rx_buffer *next_buffer;
+ /* update all of the pointers */
+ skb_frag_size_sub(frag, pull_len);
+ frag->page_offset += pull_len;
+ skb->data_len -= pull_len;
+ skb->tail += pull_len;
+}
- next_buffer = &rx_ring->rx_bi[i];
- next_buffer->skb = skb;
- rx_ring->rx_stats.non_eop_descs++;
- continue;
- }
+/**
+ * i40e_cleanup_headers - Correct empty headers
+ * @rx_ring: rx descriptor ring packet is being transacted on
+ * @skb: pointer to current skb being fixed
+ *
+ * Also address the case where we are pulling data in on pages only
+ * and as such no data is present in the skb header.
+ *
+ * In addition if skb is not at least 60 bytes we need to pad it so that
+ * it is large enough to qualify as a valid Ethernet frame.
+ *
+ * Returns true if an error was encountered and skb was freed.
+ **/
+static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb)
+{
+ /* place header in linear portion of buffer */
+ if (skb_is_nonlinear(skb))
+ i40e_pull_tail(rx_ring, skb);
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
- dev_kfree_skb_any(skb);
- continue;
- }
+ /* if eth_skb_pad returns an error the skb was freed */
+ if (eth_skb_pad(skb))
+ return true;
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ return false;
+}
- /* probably a little skewed due to removing CRC */
- total_rx_bytes += skb->len;
- total_rx_packets++;
+/**
+ * i40e_reuse_rx_page - page flip buffer and store it back on the ring
+ * @rx_ring: rx descriptor ring to store buffers on
+ * @old_buff: donor buffer to have page reused
+ *
+ * Synchronizes page for reuse by the adapter
+ **/
+static void i40e_reuse_rx_page(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *old_buff)
+{
+ struct i40e_rx_buffer *new_buff;
+ u16 nta = rx_ring->next_to_alloc;
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ new_buff = &rx_ring->rx_bi[nta];
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ /* update, and store next to alloc */
+ nta++;
+ rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
-#ifdef I40E_FCOE
- if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
- dev_kfree_skb_any(skb);
- continue;
- }
+ /* transfer page from old buffer to new buffer */
+ *new_buff = *old_buff;
+}
+
+/**
+ * i40e_page_is_reserved - check if reuse is possible
+ * @page: page struct to check
+ */
+static inline bool i40e_page_is_reserved(struct page *page)
+{
+ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
+}
+
+/**
+ * i40e_add_rx_frag - Add contents of Rx buffer to sk_buff
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_buffer: buffer containing page to add
+ * @rx_desc: descriptor containing length of buffer written by hardware
+ * @skb: sk_buff to place the data into
+ *
+ * This function will add the data contained in rx_buffer->page to the skb.
+ * This is done either through a direct copy if the data in the buffer is
+ * less than the skb header size, otherwise it will just attach the page as
+ * a frag to the skb.
+ *
+ * The function will then update the page offset if necessary and return
+ * true if the buffer can be reused by the adapter.
+ **/
+static bool i40e_add_rx_frag(struct i40e_ring *rx_ring,
+ struct i40e_rx_buffer *rx_buffer,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ struct page *page = rx_buffer->page;
+ u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ unsigned int size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
+ I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = I40E_RXBUFFER_2048;
+#else
+ unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int last_offset = PAGE_SIZE - I40E_RXBUFFER_2048;
#endif
- i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
+ /* will the data fit in the skb we allocated? if so, just
+ * copy it as it is pretty small anyway
+ */
+ if ((size <= I40E_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) {
+ unsigned char *va = page_address(page) + rx_buffer->page_offset;
- } while (likely(total_rx_packets < budget));
+ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets += total_rx_packets;
- rx_ring->stats.bytes += total_rx_bytes;
- u64_stats_update_end(&rx_ring->syncp);
- rx_ring->q_vector->rx.total_packets += total_rx_packets;
- rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* page is not reserved, we can reuse buffer as-is */
+ if (likely(!i40e_page_is_reserved(page)))
+ return true;
- return failure ? budget : total_rx_packets;
+ /* this page cannot be reused so discard it */
+ __free_pages(page, 0);
+ return false;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rx_buffer->page_offset, size, truesize);
+
+ /* avoid re-using remote pages */
+ if (unlikely(i40e_page_is_reserved(page)))
+ return false;
+
+#if (PAGE_SIZE < 8192)
+ /* if we are only owner of page we can reuse it */
+ if (unlikely(page_count(page) != 1))
+ return false;
+
+ /* flip page offset to other buffer */
+ rx_buffer->page_offset ^= truesize;
+#else
+ /* move offset up to the next cache line */
+ rx_buffer->page_offset += truesize;
+
+ if (rx_buffer->page_offset > last_offset)
+ return false;
+#endif
+
+ /* Even if we own the page, we are not allowed to use atomic_set()
+ * This would break get_page_unless_zero() users.
+ */
+ get_page(rx_buffer->page);
+
+ return true;
}
/**
- * i40e_clean_rx_irq_1buf - Reclaim resources after receive; single buffer
- * @rx_ring: rx ring to clean
- * @budget: how many cleans we're allowed
+ * i40evf_fetch_rx_buffer - Allocate skb and populate it
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @rx_desc: descriptor containing info written by hardware
*
- * Returns number of packets cleaned
+ * This function allocates an skb on the fly, and populates it with the page
+ * data from the current receive descriptor, taking care to set up the skb
+ * correctly, as well as handling calling the page recycle function if
+ * necessary.
+ */
+static inline
+struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc)
+{
+ struct i40e_rx_buffer *rx_buffer;
+ struct sk_buff *skb;
+ struct page *page;
+
+ rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ skb = rx_buffer->skb;
+
+ if (likely(!skb)) {
+ void *page_addr = page_address(page) + rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES);
+#endif
+
+ /* allocate a skb to store the frags */
+ skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
+ I40E_RX_HDR_SIZE,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_buff_failed++;
+ return NULL;
+ }
+
+ /* we will be copying header into skb->data in
+ * pskb_may_pull so it is in our interest to prefetch
+ * it now to avoid a possible cache miss
+ */
+ prefetchw(skb->data);
+ } else {
+ rx_buffer->skb = NULL;
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ I40E_RXBUFFER_2048,
+ DMA_FROM_DEVICE);
+
+ /* pull page into skb */
+ if (i40e_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ /* hand second half of page back to the ring */
+ i40e_reuse_rx_page(rx_ring, rx_buffer);
+ rx_ring->rx_stats.page_reuse_count++;
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->page = NULL;
+
+ return skb;
+}
+
+/**
+ * i40e_is_non_eop - process handling of non-EOP buffers
+ * @rx_ring: Rx ring being processed
+ * @rx_desc: Rx descriptor for current buffer
+ * @skb: Current socket buffer containing buffer in progress
+ *
+ * This function updates next to clean. If the buffer is an EOP buffer
+ * this function exits returning false, otherwise it will place the
+ * sk_buff in the next buffer to be chained and return true indicating
+ * that this is in fact a non-EOP buffer.
+ **/
+static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
+ union i40e_rx_desc *rx_desc,
+ struct sk_buff *skb)
+{
+ u32 ntc = rx_ring->next_to_clean + 1;
+
+ /* fetch, update, and store next to clean */
+ ntc = (ntc < rx_ring->count) ? ntc : 0;
+ rx_ring->next_to_clean = ntc;
+
+ prefetch(I40E_RX_DESC(rx_ring, ntc));
+
+ /* if we are the last buffer then there is nothing else to do */
+#define I40E_RXD_EOF BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)
+ if (likely(i40e_test_staterr(rx_desc, I40E_RXD_EOF)))
+ return false;
+
+ /* place skb in next buffer to be received */
+ rx_ring->rx_bi[ntc].skb = skb;
+ rx_ring->rx_stats.non_eop_descs++;
+
+ return true;
+}
+
+/**
+ * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
+ * @rx_ring: rx descriptor ring to transact packets on
+ * @budget: Total limit on number of packets to process
+ *
+ * This function provides a "bounce buffer" approach to Rx interrupt
+ * processing. The advantage to this is that on systems that have
+ * expensive overhead for IOMMU access this provides a means of avoiding
+ * it by maintaining the mapping of the page to the system.
+ *
+ * Returns amount of work completed
**/
-static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
+static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- struct i40e_vsi *vsi = rx_ring->vsi;
- union i40e_rx_desc *rx_desc;
- u32 rx_error, rx_status;
- u16 rx_packet_len;
bool failure = false;
- u8 rx_ptype;
- u64 qword;
- u16 i;
- do {
- struct i40e_rx_buffer *rx_bi;
+ while (likely(total_rx_packets < budget)) {
+ union i40e_rx_desc *rx_desc;
struct sk_buff *skb;
+ u32 rx_status;
u16 vlan_tag;
+ u8 rx_ptype;
+ u64 qword;
+
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
failure = failure ||
- i40evf_alloc_rx_buffers_1buf(rx_ring,
- cleaned_count);
+ i40evf_alloc_rx_buffers(rx_ring, cleaned_count);
cleaned_count = 0;
}
- i = rx_ring->next_to_clean;
- rx_desc = I40E_RX_DESC(rx_ring, i);
+ rx_desc = I40E_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+ rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
+ I40E_RXD_QW1_PTYPE_SHIFT;
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
- I40E_RXD_QW1_STATUS_SHIFT;
+ I40E_RXD_QW1_STATUS_SHIFT;
if (!(rx_status & BIT(I40E_RX_DESC_STATUS_DD_SHIFT)))
break;
+ /* status_error_len will always be zero for unused descriptors
+ * because it's cleared in cleanup, and overlaps with hdr_addr
+ * which is always zero because packet split isn't used, if the
+ * hardware wrote DD then it will be non-zero
+ */
+ if (!rx_desc->wb.qword1.status_error_len)
+ break;
+
/* This memory barrier is needed to keep us from reading
* any other fields out of the rx_desc until we know the
* DD bit is set.
*/
dma_rmb();
- rx_bi = &rx_ring->rx_bi[i];
- skb = rx_bi->skb;
- prefetch(skb->data);
-
- rx_packet_len = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
- I40E_RXD_QW1_LENGTH_PBUF_SHIFT;
-
- rx_error = (qword & I40E_RXD_QW1_ERROR_MASK) >>
- I40E_RXD_QW1_ERROR_SHIFT;
- rx_error &= ~BIT(I40E_RX_DESC_ERROR_HBO_SHIFT);
+ skb = i40evf_fetch_rx_buffer(rx_ring, rx_desc);
+ if (!skb)
+ break;
- rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
- I40E_RXD_QW1_PTYPE_SHIFT;
- rx_bi->skb = NULL;
cleaned_count++;
- /* Get the header and possibly the whole packet
- * If this is an skb from previous receive dma will be 0
- */
- skb_put(skb, rx_packet_len);
- dma_unmap_single(rx_ring->dev, rx_bi->dma, rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- rx_bi->dma = 0;
-
- I40E_RX_INCREMENT(rx_ring, i);
-
- if (unlikely(
- !(rx_status & BIT(I40E_RX_DESC_STATUS_EOF_SHIFT)))) {
- rx_ring->rx_stats.non_eop_descs++;
+ if (i40e_is_non_eop(rx_ring, rx_desc, skb))
continue;
- }
- /* ERR_MASK will only have valid bits if EOP set */
- if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
+ /* ERR_MASK will only have valid bits if EOP set, and
+ * what we are doing here is actually checking
+ * I40E_RX_DESC_ERROR_RXE_SHIFT, since it is the zeroth bit in
+ * the error field
+ */
+ if (unlikely(i40e_test_staterr(rx_desc, BIT(I40E_RXD_QW1_ERROR_SHIFT)))) {
dev_kfree_skb_any(skb);
continue;
}
- i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
+ if (i40e_cleanup_headers(rx_ring, skb))
+ continue;
+
/* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len;
- total_rx_packets++;
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ /* populate checksum, VLAN, and protocol */
+ i40evf_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
+
- i40e_rx_checksum(vsi, skb, rx_status, rx_error, rx_ptype);
+ vlan_tag = (qword & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) ?
+ le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1) : 0;
- vlan_tag = rx_status & BIT(I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
- ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
- : 0;
i40e_receive_skb(rx_ring, skb, vlan_tag);
- rx_desc->wb.qword1.status_error_len = 0;
- } while (likely(total_rx_packets < budget));
+ /* update budget accounting */
+ total_rx_packets++;
+ }
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
@@ -1288,6 +1292,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
+ /* guarantee a trip back through this routine if there was a failure */
return failure ? budget : total_rx_packets;
}
@@ -1411,9 +1416,11 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
i40e_for_each_ring(ring, q_vector->tx) {
- clean_complete = clean_complete &&
- i40e_clean_tx_irq(ring, vsi->work_limit);
- arm_wb = arm_wb || ring->arm_wb;
+ if (!i40e_clean_tx_irq(vsi, ring, budget)) {
+ clean_complete = false;
+ continue;
+ }
+ arm_wb |= ring->arm_wb;
ring->arm_wb = false;
}
@@ -1427,16 +1434,12 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
budget_per_ring = max(budget/q_vector->num_ringpairs, 1);
i40e_for_each_ring(ring, q_vector->rx) {
- int cleaned;
-
- if (ring_is_ps_enabled(ring))
- cleaned = i40e_clean_rx_irq_ps(ring, budget_per_ring);
- else
- cleaned = i40e_clean_rx_irq_1buf(ring, budget_per_ring);
+ int cleaned = i40e_clean_rx_irq(ring, budget_per_ring);
work_done += cleaned;
- /* if we didn't clean as many as budgeted, we must be done */
- clean_complete = clean_complete && (budget_per_ring > cleaned);
+ /* if we clean as many as budgeted, we must not be done */
+ if (cleaned >= budget_per_ring)
+ clean_complete = false;
}
/* If work not completed, return budget and polling will return */
@@ -1514,15 +1517,13 @@ out:
/**
* i40e_tso - set up the tso context descriptor
- * @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
* @hdr_len: ptr to the size of the packet header
* @cd_type_cmd_tso_mss: Quad Word 1
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
-static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
+static int i40e_tso(struct sk_buff *skb, u8 *hdr_len, u64 *cd_type_cmd_tso_mss)
{
u64 cd_cmd, cd_tso_len, cd_mss;
union {
@@ -1559,16 +1560,22 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
ip.v6->payload_len = 0;
}
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL | SKB_GSO_GRE |
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_IPXIP4 |
+ SKB_GSO_IPXIP6 |
+ SKB_GSO_UDP_TUNNEL |
SKB_GSO_UDP_TUNNEL_CSUM)) {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) {
+ if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) {
+ l4.udp->len = 0;
+
/* determine offset of outer transport header */
l4_offset = l4.hdr - skb->data;
/* remove payload length from outer checksum */
- paylen = (__force u16)l4.udp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.udp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.udp->check, htonl(paylen));
}
/* reset pointers to inner headers */
@@ -1588,9 +1595,8 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
l4_offset = l4.hdr - skb->data;
/* remove payload length from inner checksum */
- paylen = (__force u16)l4.tcp->check;
- paylen += ntohs(1) * (u16)~(skb->len - l4_offset);
- l4.tcp->check = ~csum_fold((__force __wsum)paylen);
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
@@ -1630,7 +1636,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
unsigned char *hdr;
} l4;
unsigned char *exthdr;
- u32 offset, cmd = 0, tunnel = 0;
+ u32 offset, cmd = 0;
__be16 frag_off;
u8 l4_proto = 0;
@@ -1644,6 +1650,7 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
if (skb->encapsulation) {
+ u32 tunnel = 0;
/* define outer network header type */
if (*tx_flags & I40E_TX_FLAGS_IPV4) {
tunnel |= (*tx_flags & I40E_TX_FLAGS_TSO) ?
@@ -1661,13 +1668,6 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
&l4_proto, &frag_off);
}
- /* compute outer L3 header size */
- tunnel |= ((l4.hdr - ip.hdr) / 4) <<
- I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
-
- /* switch IP header pointer from outer to inner header */
- ip.hdr = skb_inner_network_header(skb);
-
/* define outer transport */
switch (l4_proto) {
case IPPROTO_UDP:
@@ -1678,6 +1678,11 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
tunnel |= I40E_TXD_CTX_GRE_TUNNELING;
*tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
+ case IPPROTO_IPIP:
+ case IPPROTO_IPV6:
+ *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
+ l4.hdr = skb_inner_network_header(skb);
+ break;
default:
if (*tx_flags & I40E_TX_FLAGS_TSO)
return -1;
@@ -1686,12 +1691,20 @@ static int i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
return 0;
}
+ /* compute outer L3 header size */
+ tunnel |= ((l4.hdr - ip.hdr) / 4) <<
+ I40E_TXD_CTX_QW0_EXT_IPLEN_SHIFT;
+
+ /* switch IP header pointer from outer to inner header */
+ ip.hdr = skb_inner_network_header(skb);
+
/* compute tunnel header size */
tunnel |= ((ip.hdr - l4.hdr) / 2) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
/* indicate if we need to offload outer UDP header */
if ((*tx_flags & I40E_TX_FLAGS_TSO) &&
+ !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
tunnel |= I40E_TXD_CTX_QW0_L4T_CS_MASK;
@@ -1935,6 +1948,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = first;
for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
+ unsigned int max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
+
if (dma_mapping_error(tx_ring->dev, dma))
goto dma_error;
@@ -1942,12 +1957,14 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
dma_unmap_len_set(tx_bi, len, size);
dma_unmap_addr_set(tx_bi, dma, dma);
+ /* align size to end of page */
+ max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1);
tx_desc->buffer_addr = cpu_to_le64(dma);
while (unlikely(size > I40E_MAX_DATA_PER_TXD)) {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset,
- I40E_MAX_DATA_PER_TXD, td_tag);
+ max_data, td_tag);
tx_desc++;
i++;
@@ -1958,9 +1975,10 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
i = 0;
}
- dma += I40E_MAX_DATA_PER_TXD;
- size -= I40E_MAX_DATA_PER_TXD;
+ dma += max_data;
+ size -= max_data;
+ max_data = I40E_MAX_DATA_PER_TXD_ALIGNED;
tx_desc->buffer_addr = cpu_to_le64(dma);
}
@@ -2109,7 +2127,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
- count = TXD_USE_COUNT(skb->len);
+ count = i40e_txd_use_count(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
@@ -2140,7 +2158,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, &hdr_len, &cd_type_cmd_tso_mss);
+ tso = i40e_tso(skb, &hdr_len, &cd_type_cmd_tso_mss);
if (tso < 0)
goto out_drop;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 0429553fe887..0112277e5882 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -102,8 +102,8 @@ enum i40e_dyn_idx_t {
(((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
-/* Supported Rx Buffer Sizes */
-#define I40E_RXBUFFER_512 512 /* Used for packet split */
+/* Supported Rx Buffer Sizes (a multiple of 128) */
+#define I40E_RXBUFFER_256 256
#define I40E_RXBUFFER_2048 2048
#define I40E_RXBUFFER_3072 3072 /* For FCoE MTU of 2158 */
#define I40E_RXBUFFER_4096 4096
@@ -114,9 +114,28 @@ enum i40e_dyn_idx_t {
* reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
* this adds up to 512 bytes of extra data meaning the smallest allocation
* we could have is 1K.
- * i.e. RXBUFFER_512 --> size-1024 slab
+ * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
+ * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
*/
-#define I40E_RX_HDR_SIZE I40E_RXBUFFER_512
+#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
+#define i40e_rx_desc i40e_32byte_rx_desc
+
+/**
+ * i40e_test_staterr - tests bits in Rx descriptor status and error fields
+ * @rx_desc: pointer to receive descriptor (in le64 format)
+ * @stat_err_bits: value to mask
+ *
+ * This function does some fast chicanery in order to return the
+ * value of the mask which is really only used for boolean tests.
+ * The status_error_len doesn't need to be shifted because it begins
+ * at offset zero.
+ */
+static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
+ const u64 stat_err_bits)
+{
+ return !!(rx_desc->wb.qword1.status_error_len &
+ cpu_to_le64(stat_err_bits));
+}
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -142,14 +161,41 @@ enum i40e_dyn_idx_t {
prefetch((n)); \
} while (0)
-#define i40e_rx_desc i40e_32byte_rx_desc
-
#define I40E_MAX_BUFFER_TXD 8
#define I40E_MIN_TX_LEN 17
-#define I40E_MAX_DATA_PER_TXD 8192
+
+/* The size limit for a transmit buffer in a descriptor is (16K - 1).
+ * In order to align with the read requests we will align the value to
+ * the nearest 4K which represents our maximum read request size.
+ */
+#define I40E_MAX_READ_REQ_SIZE 4096
+#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
+#define I40E_MAX_DATA_PER_TXD_ALIGNED \
+ (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
+
+/* This ugly bit of math is equivalent to DIV_ROUNDUP(size, X) where X is
+ * the value I40E_MAX_DATA_PER_TXD_ALIGNED. It is needed due to the fact
+ * that 12K is not a power of 2 and division is expensive. It is used to
+ * approximate the number of descriptors used per linear buffer. Note
+ * that this will overestimate in some cases as it doesn't account for the
+ * fact that we will add up to 4K - 1 in aligning the 12K buffer, however
+ * the error should not impact things much as large buffers usually mean
+ * we will use fewer descriptors then there are frags in an skb.
+ */
+static inline unsigned int i40e_txd_use_count(unsigned int size)
+{
+ const unsigned int max = I40E_MAX_DATA_PER_TXD_ALIGNED;
+ const unsigned int reciprocal = ((1ull << 32) - 1 + (max / 2)) / max;
+ unsigned int adjust = ~(u32)0;
+
+ /* if we rounded up on the reciprocal pull down the adjustment */
+ if ((max * reciprocal) > adjust)
+ adjust = ~(u32)(reciprocal - 1);
+
+ return (u32)((((u64)size * reciprocal) + adjust) >> 32);
+}
/* Tx Descriptors needed, worst case */
-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_MIN_DESC_PENDING 4
@@ -183,10 +229,8 @@ struct i40e_tx_buffer {
struct i40e_rx_buffer {
struct sk_buff *skb;
- void *hdr_buf;
dma_addr_t dma;
struct page *page;
- dma_addr_t page_dma;
unsigned int page_offset;
};
@@ -215,22 +259,18 @@ struct i40e_rx_queue_stats {
enum i40e_ring_state_t {
__I40E_TX_FDIR_INIT_DONE,
__I40E_TX_XPS_INIT_DONE,
- __I40E_RX_PS_ENABLED,
- __I40E_RX_16BYTE_DESC_ENABLED,
};
-#define ring_is_ps_enabled(ring) \
- test_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define set_ring_ps_enabled(ring) \
- set_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define clear_ring_ps_enabled(ring) \
- clear_bit(__I40E_RX_PS_ENABLED, &(ring)->state)
-#define ring_is_16byte_desc_enabled(ring) \
- test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define set_ring_16byte_desc_enabled(ring) \
- set_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
-#define clear_ring_16byte_desc_enabled(ring) \
- clear_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
+/* some useful defines for virtchannel interface, which
+ * is the only remaining user of header split
+ */
+#define I40E_RX_DTYPE_NO_SPLIT 0
+#define I40E_RX_DTYPE_HEADER_SPLIT 1
+#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
+#define I40E_RX_SPLIT_L2 0x1
+#define I40E_RX_SPLIT_IP 0x2
+#define I40E_RX_SPLIT_TCP_UDP 0x4
+#define I40E_RX_SPLIT_SCTP 0x8
/* struct that defines a descriptor ring, associated with a VSI */
struct i40e_ring {
@@ -249,16 +289,7 @@ struct i40e_ring {
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
- u16 rx_hdr_len;
u16 rx_buf_len;
- u8 dtype;
-#define I40E_RX_DTYPE_NO_SPLIT 0
-#define I40E_RX_DTYPE_HEADER_SPLIT 1
-#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
-#define I40E_RX_SPLIT_L2 0x1
-#define I40E_RX_SPLIT_IP 0x2
-#define I40E_RX_SPLIT_TCP_UDP 0x4
-#define I40E_RX_SPLIT_SCTP 0x8
/* used in interrupt processing */
u16 next_to_use;
@@ -290,6 +321,7 @@ struct i40e_ring {
struct i40e_q_vector *q_vector; /* Backreference to associated vector */
struct rcu_head rcu; /* to avoid race on free */
+ u16 next_to_alloc;
} ____cacheline_internodealigned_in_smp;
enum i40e_latency_range {
@@ -313,9 +345,7 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next)
-bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
-bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
-void i40evf_alloc_rx_headers(struct i40e_ring *rxr);
+bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
@@ -359,7 +389,7 @@ static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
int count = 0, size = skb_headlen(skb);
for (;;) {
- count += TXD_USE_COUNT(size);
+ count += i40e_txd_use_count(size);
if (!nr_frags--)
break;
@@ -405,4 +435,14 @@ static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
/* we can support up to 8 data buffers for a single send */
return count != I40E_MAX_BUFFER_TXD;
}
+
+/**
+ * i40e_rx_is_fcoe - returns true if the Rx packet type is FCoE
+ * @ptype: the packet type field from Rx descriptor write-back
+ **/
+static inline bool i40e_rx_is_fcoe(u16 ptype)
+{
+ return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
+ (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
+}
#endif /* _I40E_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 301fe2b6dd03..97f96e0d9c4c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -36,7 +36,7 @@
#include "i40e_devids.h"
/* I40E_MASK is a macro used on 32 bit registers */
-#define I40E_MASK(mask, shift) (mask << shift)
+#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
#define I40E_MAX_VSI_QP 16
#define I40E_MAX_VF_VSI 3
@@ -258,6 +258,11 @@ struct i40e_hw_capabilities {
#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
#define I40E_FLEX10_STATUS_VC_MODE 0x2
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -522,6 +527,8 @@ struct i40e_hw {
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
struct i40e_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -1329,4 +1336,46 @@ enum i40e_reset_type {
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director and flexible payload */
+#define I40E_FD_INSET_L3_SRC_SHIFT 47
+#define I40E_FD_INSET_L3_SRC_WORD_MASK (0x3ULL << \
+ I40E_FD_INSET_L3_SRC_SHIFT)
+#define I40E_FD_INSET_L3_DST_SHIFT 35
+#define I40E_FD_INSET_L3_DST_WORD_MASK (0x3ULL << \
+ I40E_FD_INSET_L3_DST_SHIFT)
+#define I40E_FD_INSET_L4_SRC_SHIFT 34
+#define I40E_FD_INSET_L4_SRC_WORD_MASK (0x1ULL << \
+ I40E_FD_INSET_L4_SRC_SHIFT)
+#define I40E_FD_INSET_L4_DST_SHIFT 33
+#define I40E_FD_INSET_L4_DST_WORD_MASK (0x1ULL << \
+ I40E_FD_INSET_L4_DST_SHIFT)
+#define I40E_FD_INSET_VERIFY_TAG_SHIFT 31
+#define I40E_FD_INSET_VERIFY_TAG_WORD_MASK (0x3ULL << \
+ I40E_FD_INSET_VERIFY_TAG_SHIFT)
+
+#define I40E_FD_INSET_FLEX_WORD50_SHIFT 17
+#define I40E_FD_INSET_FLEX_WORD50_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD50_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD51_SHIFT 16
+#define I40E_FD_INSET_FLEX_WORD51_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD51_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD52_SHIFT 15
+#define I40E_FD_INSET_FLEX_WORD52_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD52_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD53_SHIFT 14
+#define I40E_FD_INSET_FLEX_WORD53_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD53_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD54_SHIFT 13
+#define I40E_FD_INSET_FLEX_WORD54_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD54_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD55_SHIFT 12
+#define I40E_FD_INSET_FLEX_WORD55_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD55_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD56_SHIFT 11
+#define I40E_FD_INSET_FLEX_WORD56_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD56_SHIFT)
+#define I40E_FD_INSET_FLEX_WORD57_SHIFT 10
+#define I40E_FD_INSET_FLEX_WORD57_MASK (0x1ULL << \
+ I40E_FD_INSET_FLEX_WORD57_SHIFT)
#endif /* _I40E_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index 3b9d2037456c..f04ce6cb70dc 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -80,7 +80,12 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
+
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -154,6 +159,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
@@ -162,8 +168,8 @@ struct i40e_virtchnl_vf_resource {
u16 max_mtu;
u32 vf_offload_flags;
- u32 max_fcoe_contexts;
- u32 max_fcoe_filters;
+ u32 rss_key_size;
+ u32 rss_lut_size;
struct i40e_virtchnl_vsi_resource vsi_res[1];
};
@@ -322,6 +328,39 @@ struct i40e_virtchnl_promisc_info {
* PF replies with struct i40e_eth_stats in an external buffer.
*/
+/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
+ * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the RSS fields in
+ * the VF resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct i40e_virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+struct i40e_virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * I40E_VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
+ */
+struct i40e_virtchnl_rss_hena {
+ u64 hena;
+};
+
/* I40E_VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index e657eccd232c..76ed97db28e2 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -67,8 +67,6 @@ struct i40e_vsi {
u16 rx_itr_setting;
u16 tx_itr_setting;
u16 qs_handle;
- u8 *rss_hkey_user; /* User configured hash keys */
- u8 *rss_lut_user; /* User configured lookup table entries */
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -82,9 +80,6 @@ struct i40e_vsi {
#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
/* Supported Rx Buffer Sizes */
-#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
-#define I40EVF_RXBUFFER_128 128 /* Used for packet split */
-#define I40EVF_RXBUFFER_256 256 /* Used for packet split */
#define I40EVF_RXBUFFER_2048 2048
#define I40EVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
#define I40EVF_MAX_AQ_BUF_SIZE 4096
@@ -210,9 +205,6 @@ struct i40evf_adapter {
u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED BIT(0)
-#define I40EVF_FLAG_RX_1BUF_CAPABLE BIT(1)
-#define I40EVF_FLAG_RX_PS_CAPABLE BIT(2)
-#define I40EVF_FLAG_RX_PS_ENABLED BIT(3)
#define I40EVF_FLAG_IMIR_ENABLED BIT(5)
#define I40EVF_FLAG_MQ_CAPABLE BIT(6)
#define I40EVF_FLAG_NEED_LINK_UPDATE BIT(7)
@@ -222,6 +214,8 @@ struct i40evf_adapter {
#define I40EVF_FLAG_WB_ON_ITR_CAPABLE BIT(11)
#define I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE BIT(12)
#define I40EVF_FLAG_ADDR_SET_BY_PF BIT(13)
+#define I40EVF_FLAG_PROMISC_ON BIT(15)
+#define I40EVF_FLAG_ALLMULTI_ON BIT(16)
/* duplicates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
@@ -239,8 +233,17 @@ struct i40evf_adapter {
#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES BIT(6)
#define I40EVF_FLAG_AQ_MAP_VECTORS BIT(7)
#define I40EVF_FLAG_AQ_HANDLE_RESET BIT(8)
-#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9)
+#define I40EVF_FLAG_AQ_CONFIGURE_RSS BIT(9) /* direct AQ config */
#define I40EVF_FLAG_AQ_GET_CONFIG BIT(10)
+/* Newer style, RSS done by the PF so we can ignore hardware vagaries. */
+#define I40EVF_FLAG_AQ_GET_HENA BIT(11)
+#define I40EVF_FLAG_AQ_SET_HENA BIT(12)
+#define I40EVF_FLAG_AQ_SET_RSS_KEY BIT(13)
+#define I40EVF_FLAG_AQ_SET_RSS_LUT BIT(14)
+#define I40EVF_FLAG_AQ_REQUEST_PROMISC BIT(15)
+#define I40EVF_FLAG_AQ_RELEASE_PROMISC BIT(16)
+#define I40EVF_FLAG_AQ_REQUEST_ALLMULTI BIT(17)
+#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18)
/* OS defined structs */
struct net_device *netdev;
@@ -256,10 +259,18 @@ struct i40evf_adapter {
bool netdev_registered;
bool link_up;
enum i40e_virtchnl_ops current_op;
-#define CLIENT_ENABLED(_a) ((_a)->vf_res->vf_offload_flags & \
- I40E_VIRTCHNL_VF_OFFLOAD_IWARP)
+#define CLIENT_ENABLED(_a) ((_a)->vf_res ? \
+ (_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_IWARP : \
+ 0)
+/* RSS by the PF should be preferred over RSS via other methods. */
+#define RSS_PF(_a) ((_a)->vf_res->vf_offload_flags & \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)
#define RSS_AQ(_a) ((_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ)
+#define RSS_REG(_a) (!((_a)->vf_res->vf_offload_flags & \
+ (I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ | \
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF)))
#define VLAN_ALLOWED(_a) ((_a)->vf_res->vf_offload_flags & \
I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
@@ -271,11 +282,16 @@ struct i40evf_adapter {
struct i40e_eth_stats current_stats;
struct i40e_vsi vsi;
u32 aq_wait_count;
+ /* RSS stuff */
+ u64 hena;
+ u16 rss_key_size;
+ u16 rss_lut_size;
+ u8 *rss_key;
+ u8 *rss_lut;
};
/* Ethtool Private Flags */
-#define I40EVF_PRIV_FLAGS_PS BIT(0)
/* needed by i40evf_ethtool.c */
extern char i40evf_driver_name[];
@@ -314,11 +330,12 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter);
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
void i40evf_request_stats(struct i40evf_adapter *adapter);
void i40evf_request_reset(struct i40evf_adapter *adapter);
+void i40evf_get_hena(struct i40evf_adapter *adapter);
+void i40evf_set_hena(struct i40evf_adapter *adapter);
+void i40evf_set_rss_key(struct i40evf_adapter *adapter);
+void i40evf_set_rss_lut(struct i40evf_adapter *adapter);
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen);
-int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut,
- u16 lut_size);
-int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut,
- u16 lut_size);
+int i40evf_config_rss(struct i40evf_adapter *adapter);
#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index dd4430aae7fa..c9c202f6c521 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -63,12 +63,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
#define I40EVF_STATS_LEN(_dev) \
(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
-static const char i40evf_priv_flags_strings[][ETH_GSTRING_LEN] = {
- "packet-split",
-};
-
-#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_priv_flags_strings)
-
/**
* i40evf_get_settings - Get Link Speed and Duplex settings
* @netdev: network interface device structure
@@ -103,8 +97,6 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset)
{
if (sset == ETH_SS_STATS)
return I40EVF_STATS_LEN(netdev);
- else if (sset == ETH_SS_PRIV_FLAGS)
- return I40EVF_PRIV_FLAGS_STR_LEN;
else
return -EINVAL;
}
@@ -170,12 +162,6 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
p += ETH_GSTRING_LEN;
}
- } else if (sset == ETH_SS_PRIV_FLAGS) {
- for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
- memcpy(data, i40evf_priv_flags_strings[i],
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
}
}
@@ -225,7 +211,6 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->version, i40evf_driver_version, 32);
strlcpy(drvinfo->fw_version, "N/A", 4);
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
- drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
}
/**
@@ -378,63 +363,6 @@ static int i40evf_set_coalesce(struct net_device *netdev,
}
/**
- * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
- * @adapter: board private structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow is supported, else Invalid Input.
- **/
-static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
- struct ethtool_rxnfc *cmd)
-{
- struct i40e_hw *hw = &adapter->hw;
- u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
- ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
-
- /* We always hash on IP src and dest addresses */
- cmd->data = RXH_IP_SRC | RXH_IP_DST;
-
- switch (cmd->flow_type) {
- case TCP_V4_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
- case UDP_V4_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
-
- case SCTP_V4_FLOW:
- case AH_ESP_V4_FLOW:
- case AH_V4_FLOW:
- case ESP_V4_FLOW:
- case IPV4_FLOW:
- break;
-
- case TCP_V6_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
- case UDP_V6_FLOW:
- if (hena & BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
- cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
- break;
-
- case SCTP_V6_FLOW:
- case AH_ESP_V6_FLOW:
- case AH_V6_FLOW:
- case ESP_V6_FLOW:
- case IPV6_FLOW:
- break;
- default:
- cmd->data = 0;
- return -EINVAL;
- }
-
- return 0;
-}
-
-/**
* i40evf_get_rxnfc - command to get RX flow classification rules
* @netdev: network interface device structure
* @cmd: ethtool rxnfc command
@@ -454,145 +382,8 @@ static int i40evf_get_rxnfc(struct net_device *netdev,
ret = 0;
break;
case ETHTOOL_GRXFH:
- ret = i40evf_get_rss_hash_opts(adapter, cmd);
- break;
- default:
- break;
- }
-
- return ret;
-}
-
-/**
- * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
- * @adapter: board private structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the flow input set is supported.
- **/
-static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
- struct ethtool_rxnfc *nfc)
-{
- struct i40e_hw *hw = &adapter->hw;
- u32 flags = adapter->vf_res->vf_offload_flags;
-
- u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
- ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
-
- /* RSS does not support anything other than hashing
- * to queues on src and dst IPs and ports
- */
- if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
- RXH_L4_B_0_1 | RXH_L4_B_2_3))
- return -EINVAL;
-
- /* We need at least the IP SRC and DEST fields for hashing */
- if (!(nfc->data & RXH_IP_SRC) ||
- !(nfc->data & RXH_IP_DST))
- return -EINVAL;
-
- switch (nfc->flow_type) {
- case TCP_V4_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK);
-
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- } else {
- return -EINVAL;
- }
- break;
- case TCP_V6_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
-
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- } else {
- return -EINVAL;
- }
- break;
- case UDP_V4_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP);
-
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- } else {
- return -EINVAL;
- }
- break;
- case UDP_V6_FLOW:
- if (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
- if (flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena |=
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP);
-
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- } else {
- return -EINVAL;
- }
- break;
- case AH_ESP_V4_FLOW:
- case AH_V4_FLOW:
- case ESP_V4_FLOW:
- case SCTP_V4_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
- return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
- break;
- case AH_ESP_V6_FLOW:
- case AH_V6_FLOW:
- case ESP_V6_FLOW:
- case SCTP_V6_FLOW:
- if ((nfc->data & RXH_L4_B_0_1) ||
- (nfc->data & RXH_L4_B_2_3))
- return -EINVAL;
- hena |= BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
- break;
- case IPV4_FLOW:
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4));
- break;
- case IPV6_FLOW:
- hena |= (BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6));
- break;
- default:
- return -EINVAL;
- }
-
- wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
- wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
- i40e_flush(hw);
-
- return 0;
-}
-
-/**
- * i40evf_set_rxnfc - command to set RX flow classification rules
- * @netdev: network interface device structure
- * @cmd: ethtool rxnfc command
- *
- * Returns Success if the command is supported.
- **/
-static int i40evf_set_rxnfc(struct net_device *netdev,
- struct ethtool_rxnfc *cmd)
-{
- struct i40evf_adapter *adapter = netdev_priv(netdev);
- int ret = -EOPNOTSUPP;
-
- switch (cmd->cmd) {
- case ETHTOOL_SRXFH:
- ret = i40evf_set_rss_hash_opt(adapter, cmd);
+ netdev_info(netdev,
+ "RSS hash info is not available to vf, use pf.\n");
break;
default:
break;
@@ -600,7 +391,6 @@ static int i40evf_set_rxnfc(struct net_device *netdev,
return ret;
}
-
/**
* i40evf_get_channels: get the number of channels supported by the device
* @netdev: network interface device structure
@@ -624,6 +414,19 @@ static void i40evf_get_channels(struct net_device *netdev,
}
/**
+ * i40evf_get_rxfh_key_size - get the RSS hash key size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_key_size(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->rss_key_size;
+}
+
+/**
* i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
* @netdev: network interface device structure
*
@@ -631,7 +434,9 @@ static void i40evf_get_channels(struct net_device *netdev,
**/
static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
{
- return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ return adapter->rss_lut_size;
}
/**
@@ -646,9 +451,6 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_vsi *vsi = &adapter->vsi;
- u8 *seed = NULL, *lut;
- int ret;
u16 i;
if (hfunc)
@@ -656,24 +458,13 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
if (!indir)
return 0;
- seed = key;
-
- lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL);
- if (!lut)
- return -ENOMEM;
-
- ret = i40evf_get_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE);
- if (ret)
- goto out;
+ memcpy(key, adapter->rss_key, adapter->rss_key_size);
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++)
- indir[i] = (u32)lut[i];
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ indir[i] = (u32)adapter->rss_lut[i];
-out:
- kfree(lut);
-
- return ret;
+ return 0;
}
/**
@@ -689,8 +480,6 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_vsi *vsi = &adapter->vsi;
- u8 *seed = NULL;
u16 i;
/* We do not allow change in unsupported parameters */
@@ -701,76 +490,14 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
return 0;
if (key) {
- if (!vsi->rss_hkey_user) {
- vsi->rss_hkey_user = kzalloc(I40EVF_HKEY_ARRAY_SIZE,
- GFP_KERNEL);
- if (!vsi->rss_hkey_user)
- return -ENOMEM;
- }
- memcpy(vsi->rss_hkey_user, key, I40EVF_HKEY_ARRAY_SIZE);
- seed = vsi->rss_hkey_user;
- }
- if (!vsi->rss_lut_user) {
- vsi->rss_lut_user = kzalloc(I40EVF_HLUT_ARRAY_SIZE,
- GFP_KERNEL);
- if (!vsi->rss_lut_user)
- return -ENOMEM;
+ memcpy(adapter->rss_key, key, adapter->rss_key_size);
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
- for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++)
- vsi->rss_lut_user[i] = (u8)(indir[i]);
-
- return i40evf_config_rss(vsi, seed, vsi->rss_lut_user,
- I40EVF_HLUT_ARRAY_SIZE);
-}
-
-/**
- * i40evf_get_priv_flags - report device private flags
- * @dev: network interface device structure
- *
- * The get string set count and the string set should be matched for each
- * flag returned. Add new strings for each flag to the i40e_priv_flags_strings
- * array.
- *
- * Returns a u32 bitmap of flags.
- **/
-static u32 i40evf_get_priv_flags(struct net_device *dev)
-{
- struct i40evf_adapter *adapter = netdev_priv(dev);
- u32 ret_flags = 0;
-
- ret_flags |= adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ?
- I40EVF_PRIV_FLAGS_PS : 0;
-
- return ret_flags;
-}
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ adapter->rss_lut[i] = (u8)(indir[i]);
-/**
- * i40evf_set_priv_flags - set private flags
- * @dev: network interface device structure
- * @flags: bit flags to be set
- **/
-static int i40evf_set_priv_flags(struct net_device *dev, u32 flags)
-{
- struct i40evf_adapter *adapter = netdev_priv(dev);
- bool reset_required = false;
-
- if ((flags & I40EVF_PRIV_FLAGS_PS) &&
- !(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
- adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
- reset_required = true;
- } else if (!(flags & I40EVF_PRIV_FLAGS_PS) &&
- (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
- adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
- reset_required = true;
- }
-
- /* if needed, issue reset to cause things to take effect */
- if (reset_required)
- i40evf_schedule_reset(adapter);
-
- return 0;
+ return i40evf_config_rss(adapter);
}
static const struct ethtool_ops i40evf_ethtool_ops = {
@@ -782,18 +509,16 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
.get_strings = i40evf_get_strings,
.get_ethtool_stats = i40evf_get_ethtool_stats,
.get_sset_count = i40evf_get_sset_count,
- .get_priv_flags = i40evf_get_priv_flags,
- .set_priv_flags = i40evf_set_priv_flags,
.get_msglevel = i40evf_get_msglevel,
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
.set_coalesce = i40evf_set_coalesce,
.get_rxnfc = i40evf_get_rxnfc,
- .set_rxnfc = i40evf_set_rxnfc,
.get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
.get_rxfh = i40evf_get_rxfh,
.set_rxfh = i40evf_set_rxfh,
.get_channels = i40evf_get_channels,
+ .get_rxfh_key_size = i40evf_get_rxfh_key_size,
};
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 4b70aae2fa84..600fb9c4a7f0 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -37,8 +37,8 @@ static const char i40evf_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1
-#define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 15
+#define DRV_VERSION_MINOR 6
+#define DRV_VERSION_BUILD 11
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \
@@ -57,7 +57,9 @@ static const char i40evf_copyright[] =
*/
static const struct pci_device_id i40evf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_VF), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_VF_HV), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_X722_VF_HV), 0},
/* required last entry */
{0, }
};
@@ -641,28 +643,11 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
static void i40evf_configure_rx(struct i40evf_adapter *adapter)
{
struct i40e_hw *hw = &adapter->hw;
- struct net_device *netdev = adapter->netdev;
- int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
int i;
- int rx_buf_len;
-
-
- /* Set the RX buffer length according to the mode */
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ||
- netdev->mtu <= ETH_DATA_LEN)
- rx_buf_len = I40EVF_RXBUFFER_2048;
- else
- rx_buf_len = ALIGN(max_frame, 1024);
for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
- adapter->rx_rings[i].rx_buf_len = rx_buf_len;
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
- set_ring_ps_enabled(&adapter->rx_rings[i]);
- adapter->rx_rings[i].rx_hdr_len = I40E_RX_HDR_SIZE;
- } else {
- clear_ring_ps_enabled(&adapter->rx_rings[i]);
- }
+ adapter->rx_rings[i].rx_buf_len = I40EVF_RXBUFFER_2048;
}
}
@@ -842,7 +827,7 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
ether_addr_copy(f->macaddr, macaddr);
- list_add(&f->list, &adapter->mac_filter_list);
+ list_add_tail(&f->list, &adapter->mac_filter_list);
f->add = true;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
}
@@ -943,6 +928,21 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
bottom_of_search_loop:
continue;
}
+
+ if (netdev->flags & IFF_PROMISC &&
+ !(adapter->flags & I40EVF_FLAG_PROMISC_ON))
+ adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_PROMISC;
+ else if (!(netdev->flags & IFF_PROMISC) &&
+ adapter->flags & I40EVF_FLAG_PROMISC_ON)
+ adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_PROMISC;
+
+ if (netdev->flags & IFF_ALLMULTI &&
+ !(adapter->flags & I40EVF_FLAG_ALLMULTI_ON))
+ adapter->aq_required |= I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+ else if (!(netdev->flags & IFF_ALLMULTI) &&
+ adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
+ adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
+
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
}
@@ -999,14 +999,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *ring = &adapter->rx_rings[i];
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
- i40evf_alloc_rx_headers(ring);
- i40evf_alloc_rx_buffers_ps(ring, ring->count);
- } else {
- i40evf_alloc_rx_buffers_1buf(ring, ring->count);
- }
- ring->next_to_use = ring->count - 1;
- writel(ring->next_to_use, ring->tail);
+ i40evf_alloc_rx_buffers(ring, I40E_DESC_UNUSED(ring));
}
}
@@ -1224,24 +1217,18 @@ out:
}
/**
- * i40e_config_rss_aq - Prepare for RSS using AQ commands
- * @vsi: vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * i40e_config_rss_aq - Configure RSS keys and lut by using AQ commands
+ * @adapter: board private structure
*
* Return 0 on success, negative on failure
**/
-static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
- u8 *lut, u16 lut_size)
+static int i40evf_config_rss_aq(struct i40evf_adapter *adapter)
{
- struct i40evf_adapter *adapter = vsi->back;
+ struct i40e_aqc_get_set_rss_key_data *rss_key =
+ (struct i40e_aqc_get_set_rss_key_data *)adapter->rss_key;
struct i40e_hw *hw = &adapter->hw;
int ret = 0;
- if (!vsi->id)
- return -EINVAL;
-
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
@@ -1249,198 +1236,82 @@ static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
return -EBUSY;
}
- if (seed) {
- struct i40e_aqc_get_set_rss_key_data *rss_key =
- (struct i40e_aqc_get_set_rss_key_data *)seed;
- ret = i40evf_aq_set_rss_key(hw, vsi->id, rss_key);
- if (ret) {
- dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
+ ret = i40evf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
+ i40evf_stat_str(hw, ret),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
+ return ret;
+
}
- if (lut) {
- ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, lut, lut_size);
- if (ret) {
- dev_err(&adapter->pdev->dev,
- "Cannot set RSS lut, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
+ ret = i40evf_aq_set_rss_lut(hw, adapter->vsi.id, false,
+ adapter->rss_lut, adapter->rss_lut_size);
+ if (ret) {
+ dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
+ i40evf_stat_str(hw, ret),
+ i40evf_aq_str(hw, hw->aq.asq_last_status));
}
return ret;
+
}
/**
* i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * @adapter: board private structure
*
* Returns 0 on success, negative on failure
**/
-static int i40evf_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
- const u8 *lut, u16 lut_size)
+static int i40evf_config_rss_reg(struct i40evf_adapter *adapter)
{
- struct i40evf_adapter *adapter = vsi->back;
struct i40e_hw *hw = &adapter->hw;
+ u32 *dw;
u16 i;
- if (seed) {
- u32 *seed_dw = (u32 *)seed;
-
- for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
- wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]);
- }
-
- if (lut) {
- u32 *lut_dw = (u32 *)lut;
+ dw = (u32 *)adapter->rss_key;
+ for (i = 0; i <= adapter->rss_key_size / 4; i++)
+ wr32(hw, I40E_VFQF_HKEY(i), dw[i]);
- if (lut_size != I40EVF_HLUT_ARRAY_SIZE)
- return -EINVAL;
+ dw = (u32 *)adapter->rss_lut;
+ for (i = 0; i <= adapter->rss_lut_size / 4; i++)
+ wr32(hw, I40E_VFQF_HLUT(i), dw[i]);
- for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
- wr32(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
- }
i40e_flush(hw);
return 0;
}
/**
- * * i40evf_get_rss_aq - Get RSS keys and lut by using AQ commands
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
- *
- * Return 0 on success, negative on failure
- **/
-static int i40evf_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
- u8 *lut, u16 lut_size)
-{
- struct i40evf_adapter *adapter = vsi->back;
- struct i40e_hw *hw = &adapter->hw;
- int ret = 0;
-
- if (seed) {
- ret = i40evf_aq_get_rss_key(hw, vsi->id,
- (struct i40e_aqc_get_set_rss_key_data *)seed);
- if (ret) {
- dev_err(&adapter->pdev->dev,
- "Cannot get RSS key, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
- }
-
- if (lut) {
- ret = i40evf_aq_get_rss_lut(hw, vsi->id, seed, lut, lut_size);
- if (ret) {
- dev_err(&adapter->pdev->dev,
- "Cannot get RSS lut, err %s aq_err %s\n",
- i40evf_stat_str(hw, ret),
- i40evf_aq_str(hw, hw->aq.asq_last_status));
- return ret;
- }
- }
-
- return ret;
-}
-
-/**
- * * i40evf_get_rss_reg - Get RSS keys and lut by reading registers
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
- *
- * Returns 0 on success, negative on failure
- **/
-static int i40evf_get_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
- const u8 *lut, u16 lut_size)
-{
- struct i40evf_adapter *adapter = vsi->back;
- struct i40e_hw *hw = &adapter->hw;
- u16 i;
-
- if (seed) {
- u32 *seed_dw = (u32 *)seed;
-
- for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
- seed_dw[i] = rd32(hw, I40E_VFQF_HKEY(i));
- }
-
- if (lut) {
- u32 *lut_dw = (u32 *)lut;
-
- if (lut_size != I40EVF_HLUT_ARRAY_SIZE)
- return -EINVAL;
-
- for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
- lut_dw[i] = rd32(hw, I40E_VFQF_HLUT(i));
- }
-
- return 0;
-}
-
-/**
* i40evf_config_rss - Configure RSS keys and lut
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
- *
- * Returns 0 on success, negative on failure
- **/
-int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed,
- u8 *lut, u16 lut_size)
-{
- struct i40evf_adapter *adapter = vsi->back;
-
- if (RSS_AQ(adapter))
- return i40evf_config_rss_aq(vsi, seed, lut, lut_size);
- else
- return i40evf_config_rss_reg(vsi, seed, lut, lut_size);
-}
-
-/**
- * i40evf_get_rss - Get RSS keys and lut
- * @vsi: Pointer to vsi structure
- * @seed: RSS hash seed
- * @lut: Lookup table
- * @lut_size: Lookup table size
+ * @adapter: board private structure
*
* Returns 0 on success, negative on failure
**/
-int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size)
+int i40evf_config_rss(struct i40evf_adapter *adapter)
{
- struct i40evf_adapter *adapter = vsi->back;
- if (RSS_AQ(adapter))
- return i40evf_get_rss_aq(vsi, seed, lut, lut_size);
- else
- return i40evf_get_rss_reg(vsi, seed, lut, lut_size);
+ if (RSS_PF(adapter)) {
+ adapter->aq_required |= I40EVF_FLAG_AQ_SET_RSS_LUT |
+ I40EVF_FLAG_AQ_SET_RSS_KEY;
+ return 0;
+ } else if (RSS_AQ(adapter)) {
+ return i40evf_config_rss_aq(adapter);
+ } else {
+ return i40evf_config_rss_reg(adapter);
+ }
}
/**
* i40evf_fill_rss_lut - Fill the lut with default values
- * @lut: Lookup table to be filled with
- * @rss_table_size: Lookup table size
- * @rss_size: Range of queue number for hashing
+ * @adapter: board private structure
**/
-static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
+static void i40evf_fill_rss_lut(struct i40evf_adapter *adapter)
{
u16 i;
- for (i = 0; i < rss_table_size; i++)
- lut[i] = i % rss_size;
+ for (i = 0; i < adapter->rss_lut_size; i++)
+ adapter->rss_lut[i] = i % adapter->num_active_queues;
}
/**
@@ -1451,42 +1322,25 @@ static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
**/
static int i40evf_init_rss(struct i40evf_adapter *adapter)
{
- struct i40e_vsi *vsi = &adapter->vsi;
struct i40e_hw *hw = &adapter->hw;
- u8 seed[I40EVF_HKEY_ARRAY_SIZE];
- u64 hena;
- u8 *lut;
int ret;
- /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
- if (adapter->vf_res->vf_offload_flags &
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
- hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
- else
- hena = I40E_DEFAULT_RSS_HENA;
- wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
- wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+ if (!RSS_PF(adapter)) {
+ /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
+ if (adapter->vf_res->vf_offload_flags &
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
+ adapter->hena = I40E_DEFAULT_RSS_HENA_EXPANDED;
+ else
+ adapter->hena = I40E_DEFAULT_RSS_HENA;
- lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL);
- if (!lut)
- return -ENOMEM;
+ wr32(hw, I40E_VFQF_HENA(0), (u32)adapter->hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(adapter->hena >> 32));
+ }
- /* Use user configured lut if there is one, otherwise use default */
- if (vsi->rss_lut_user)
- memcpy(lut, vsi->rss_lut_user, I40EVF_HLUT_ARRAY_SIZE);
- else
- i40evf_fill_rss_lut(lut, I40EVF_HLUT_ARRAY_SIZE,
- adapter->num_active_queues);
+ i40evf_fill_rss_lut(adapter);
- /* Use user configured hash key if there is one, otherwise
- * user default.
- */
- if (vsi->rss_hkey_user)
- memcpy(seed, vsi->rss_hkey_user, I40EVF_HKEY_ARRAY_SIZE);
- else
- netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE);
- ret = i40evf_config_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE);
- kfree(lut);
+ netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
+ ret = i40evf_config_rss(adapter);
return ret;
}
@@ -1507,7 +1361,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
GFP_KERNEL);
if (!adapter->q_vectors)
- goto err_out;
+ return -ENOMEM;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
q_vector = &adapter->q_vectors[q_idx];
@@ -1519,15 +1373,6 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
}
return 0;
-
-err_out:
- while (q_idx) {
- q_idx--;
- q_vector = &adapter->q_vectors[q_idx];
- netif_napi_del(&q_vector->napi);
- }
- kfree(adapter->q_vectors);
- return -ENOMEM;
}
/**
@@ -1610,19 +1455,16 @@ err_set_interrupt:
}
/**
- * i40evf_clear_rss_config_user - Clear user configurations of RSS
- * @vsi: Pointer to VSI structure
+ * i40evf_free_rss - Free memory used by RSS structs
+ * @adapter: board private structure
**/
-static void i40evf_clear_rss_config_user(struct i40e_vsi *vsi)
+static void i40evf_free_rss(struct i40evf_adapter *adapter)
{
- if (!vsi)
- return;
-
- kfree(vsi->rss_hkey_user);
- vsi->rss_hkey_user = NULL;
+ kfree(adapter->rss_key);
+ adapter->rss_key = NULL;
- kfree(vsi->rss_lut_user);
- vsi->rss_lut_user = NULL;
+ kfree(adapter->rss_lut);
+ adapter->rss_lut = NULL;
}
/**
@@ -1756,6 +1598,39 @@ static void i40evf_watchdog_task(struct work_struct *work)
adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
goto watchdog_done;
}
+ if (adapter->aq_required & I40EVF_FLAG_AQ_GET_HENA) {
+ i40evf_get_hena(adapter);
+ goto watchdog_done;
+ }
+ if (adapter->aq_required & I40EVF_FLAG_AQ_SET_HENA) {
+ i40evf_set_hena(adapter);
+ goto watchdog_done;
+ }
+ if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_KEY) {
+ i40evf_set_rss_key(adapter);
+ goto watchdog_done;
+ }
+ if (adapter->aq_required & I40EVF_FLAG_AQ_SET_RSS_LUT) {
+ i40evf_set_rss_lut(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_PROMISC) {
+ i40evf_set_promiscuous(adapter, I40E_FLAG_VF_UNICAST_PROMISC |
+ I40E_FLAG_VF_MULTICAST_PROMISC);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_REQUEST_ALLMULTI) {
+ i40evf_set_promiscuous(adapter, I40E_FLAG_VF_MULTICAST_PROMISC);
+ goto watchdog_done;
+ }
+
+ if ((adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_PROMISC) &&
+ (adapter->aq_required & I40EVF_FLAG_AQ_RELEASE_ALLMULTI)) {
+ i40evf_set_promiscuous(adapter, 0);
+ goto watchdog_done;
+ }
if (adapter->state == __I40EVF_RUNNING)
i40evf_request_stats(adapter);
@@ -2003,6 +1878,8 @@ static void i40evf_adminq_task(struct work_struct *work)
/* check for error indications */
val = rd32(hw, hw->aq.arq.len);
+ if (val == 0xdeadbeef) /* indicates device in reset */
+ goto freedom;
oldval = val;
if (val & I40E_VF_ARQLEN1_ARQVFE_MASK) {
dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
@@ -2259,6 +2136,28 @@ static int i40evf_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
+#define I40EVF_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_TX |\
+ NETIF_F_HW_VLAN_CTAG_RX |\
+ NETIF_F_HW_VLAN_CTAG_FILTER)
+
+/**
+ * i40evf_fix_features - fix up the netdev feature bits
+ * @netdev: our net device
+ * @features: desired feature bits
+ *
+ * Returns fixed-up features bits
+ **/
+static netdev_features_t i40evf_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ features &= ~I40EVF_VLAN_FEATURES;
+ if (adapter->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN)
+ features |= I40EVF_VLAN_FEATURES;
+ return features;
+}
+
static const struct net_device_ops i40evf_netdev_ops = {
.ndo_open = i40evf_open,
.ndo_stop = i40evf_close,
@@ -2271,6 +2170,7 @@ static const struct net_device_ops i40evf_netdev_ops = {
.ndo_tx_timeout = i40evf_tx_timeout,
.ndo_vlan_rx_add_vid = i40evf_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = i40evf_vlan_rx_kill_vid,
+ .ndo_fix_features = i40evf_fix_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = i40evf_netpoll,
#endif
@@ -2307,57 +2207,61 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
**/
int i40evf_process_config(struct i40evf_adapter *adapter)
{
+ struct i40e_virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
+ struct i40e_vsi *vsi = &adapter->vsi;
int i;
/* got VF config message back from PF, now we can parse it */
- for (i = 0; i < adapter->vf_res->num_vsis; i++) {
- if (adapter->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
- adapter->vsi_res = &adapter->vf_res->vsi_res[i];
+ for (i = 0; i < vfres->num_vsis; i++) {
+ if (vfres->vsi_res[i].vsi_type == I40E_VSI_SRIOV)
+ adapter->vsi_res = &vfres->vsi_res[i];
}
if (!adapter->vsi_res) {
dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
return -ENODEV;
}
- if (adapter->vf_res->vf_offload_flags
- & I40E_VIRTCHNL_VF_OFFLOAD_VLAN) {
- netdev->vlan_features = netdev->features &
- ~(NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER);
- netdev->features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
- }
- netdev->features |= NETIF_F_HIGHDMA |
- NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_SCTP_CRC |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_TSO_ECN |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_RXCSUM |
- NETIF_F_GRO;
-
- netdev->hw_enc_features |= NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_TSO_ECN |
- NETIF_F_GSO_GRE |
- NETIF_F_GSO_UDP_TUNNEL |
- NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- if (adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE)
- netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
-
- /* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
- netdev->hw_features &= ~NETIF_F_RXCSUM;
+ netdev->hw_enc_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_HIGHDMA |
+ NETIF_F_SOFT_FEATURES |
+ NETIF_F_TSO |
+ NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 |
+ NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM |
+ NETIF_F_GSO_IPXIP4 |
+ NETIF_F_GSO_IPXIP6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_RXHASH |
+ NETIF_F_RXCSUM |
+ 0;
+
+ if (!(adapter->flags & I40EVF_FLAG_OUTER_UDP_CSUM_CAPABLE))
+ netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
+
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+
+ /* record features VLANs can make use of */
+ netdev->vlan_features |= netdev->hw_enc_features |
+ NETIF_F_TSO_MANGLEID;
+
+ /* Write features and hw_features separately to avoid polluting
+ * with, or dropping, features that are set when we registgered.
+ */
+ netdev->hw_features |= netdev->hw_enc_features;
+
+ netdev->features |= netdev->hw_enc_features | I40EVF_VLAN_FEATURES;
+ netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
+
+ /* disable VLAN features if not supported */
+ if (!(vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_VLAN))
+ netdev->features ^= I40EVF_VLAN_FEATURES;
adapter->vsi.id = adapter->vsi_res->vsi_id;
@@ -2368,8 +2272,16 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
- adapter->vsi.netdev = adapter->netdev;
- adapter->vsi.qs_handle = adapter->vsi_res->qset_handle;
+ vsi->netdev = adapter->netdev;
+ vsi->qs_handle = adapter->vsi_res->qset_handle;
+ if (vfres->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ adapter->rss_key_size = vfres->rss_key_size;
+ adapter->rss_lut_size = vfres->rss_lut_size;
+ } else {
+ adapter->rss_key_size = I40EVF_HKEY_ARRAY_SIZE;
+ adapter->rss_lut_size = I40EVF_HLUT_ARRAY_SIZE;
+ }
+
return 0;
}
@@ -2502,11 +2414,6 @@ static void i40evf_init_task(struct work_struct *work)
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
- adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
- adapter->flags |= I40EVF_FLAG_RX_PS_CAPABLE;
-
- /* Default to single buffer rx, can be changed through ethtool. */
- adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
netdev->netdev_ops = &i40evf_netdev_ops;
i40evf_set_ethtool_ops(netdev);
@@ -2565,6 +2472,11 @@ static void i40evf_init_task(struct work_struct *work)
set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_misc_irq_enable(adapter);
+ adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
+ adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
+ if (!adapter->rss_key || !adapter->rss_lut)
+ goto err_mem;
+
if (RSS_AQ(adapter)) {
adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
@@ -2575,7 +2487,8 @@ static void i40evf_init_task(struct work_struct *work)
restart:
schedule_delayed_work(&adapter->init_task, msecs_to_jiffies(30));
return;
-
+err_mem:
+ i40evf_free_rss(adapter);
err_register:
i40evf_free_misc_irq(adapter);
err_sw_init:
@@ -2838,11 +2751,11 @@ static void i40evf_remove(struct pci_dev *pdev)
adapter->state = __I40EVF_REMOVE;
adapter->aq_required = 0;
i40evf_request_reset(adapter);
- msleep(20);
+ msleep(50);
/* If the FW isn't responding, kick it once, but only once. */
if (!i40evf_asq_done(hw)) {
i40evf_request_reset(adapter);
- msleep(20);
+ msleep(50);
}
if (adapter->msix_entries) {
@@ -2857,8 +2770,7 @@ static void i40evf_remove(struct pci_dev *pdev)
flush_scheduled_work();
- /* Clear user configurations for RSS */
- i40evf_clear_rss_config_user(&adapter->vsi);
+ i40evf_free_rss(adapter);
if (hw->aq.asq.count)
i40evf_shutdown_adminq(hw);
@@ -2869,7 +2781,6 @@ static void i40evf_remove(struct pci_dev *pdev)
iounmap(hw->hw_addr);
pci_release_regions(pdev);
-
i40evf_free_all_tx_resources(adapter);
i40evf_free_all_rx_resources(adapter);
i40evf_free_queues(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 488e738f76c6..d76c221d4c8a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -270,10 +270,6 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi->rxq.max_pkt_size = adapter->netdev->mtu
+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
- if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
- vqpi->rxq.splithdr_enabled = true;
- vqpi->rxq.hdr_size = I40E_RX_HDR_SIZE;
- }
vqpi++;
}
@@ -438,6 +434,8 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
ether_addr_copy(veal->list[i].addr, f->macaddr);
i++;
f->add = false;
+ if (i == count)
+ break;
}
}
if (!more)
@@ -501,6 +499,8 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
i++;
list_del(&f->list);
kfree(f);
+ if (i == count)
+ break;
}
}
if (!more)
@@ -564,6 +564,8 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
vvfl->vlan_id[i] = f->vlan;
i++;
f->add = false;
+ if (i == count)
+ break;
}
}
if (!more)
@@ -627,6 +629,8 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
i++;
list_del(&f->list);
kfree(f);
+ if (i == count)
+ break;
}
}
if (!more)
@@ -645,6 +649,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
{
struct i40e_virtchnl_promisc_info vpi;
+ int promisc_all;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */
@@ -652,6 +657,27 @@ void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags)
adapter->current_op);
return;
}
+
+ promisc_all = I40E_FLAG_VF_UNICAST_PROMISC |
+ I40E_FLAG_VF_MULTICAST_PROMISC;
+ if ((flags & promisc_all) == promisc_all) {
+ adapter->flags |= I40EVF_FLAG_PROMISC_ON;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_PROMISC;
+ dev_info(&adapter->pdev->dev, "Entering promiscuous mode\n");
+ }
+
+ if (flags & I40E_FLAG_VF_MULTICAST_PROMISC) {
+ adapter->flags |= I40EVF_FLAG_ALLMULTI_ON;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_REQUEST_ALLMULTI;
+ dev_info(&adapter->pdev->dev, "Entering multicast promiscuous mode\n");
+ }
+
+ if (!flags) {
+ adapter->flags &= ~I40EVF_FLAG_PROMISC_ON;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_RELEASE_PROMISC;
+ dev_info(&adapter->pdev->dev, "Leaving promiscuous mode\n");
+ }
+
adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE;
vpi.vsi_id = adapter->vsi_res->vsi_id;
vpi.flags = flags;
@@ -681,6 +707,115 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
/* if the request failed, don't lock out others */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
}
+
+/**
+ * i40evf_get_hena
+ * @adapter: adapter structure
+ *
+ * Request hash enable capabilities from PF
+ **/
+void i40evf_get_hena(struct i40evf_adapter *adapter)
+{
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot get RSS hash capabilities, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ adapter->current_op = I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_HENA;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS,
+ NULL, 0);
+}
+
+/**
+ * i40evf_set_hena
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS hash capabilities
+ **/
+void i40evf_set_hena(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_rss_hena vrh;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot set RSS hash enable, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ vrh.hena = adapter->hena;
+ adapter->current_op = I40E_VIRTCHNL_OP_SET_RSS_HENA;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_HENA;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ (u8 *)&vrh, sizeof(vrh));
+}
+
+/**
+ * i40evf_set_rss_key
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS hash key
+ **/
+void i40evf_set_rss_key(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_rss_key *vrk;
+ int len;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot set RSS key, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ len = sizeof(struct i40e_virtchnl_rss_key) +
+ (adapter->rss_key_size * sizeof(u8)) - 1;
+ vrk = kzalloc(len, GFP_KERNEL);
+ if (!vrk)
+ return;
+ vrk->vsi_id = adapter->vsi.id;
+ vrk->key_len = adapter->rss_key_size;
+ memcpy(vrk->key, adapter->rss_key, adapter->rss_key_size);
+
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_KEY;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_KEY;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ (u8 *)vrk, len);
+ kfree(vrk);
+}
+
+/**
+ * i40evf_set_rss_lut
+ * @adapter: adapter structure
+ *
+ * Request the PF to set our RSS lookup table
+ **/
+void i40evf_set_rss_lut(struct i40evf_adapter *adapter)
+{
+ struct i40e_virtchnl_rss_lut *vrl;
+ int len;
+
+ if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot set RSS LUT, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ len = sizeof(struct i40e_virtchnl_rss_lut) +
+ (adapter->rss_lut_size * sizeof(u8)) - 1;
+ vrl = kzalloc(len, GFP_KERNEL);
+ if (!vrl)
+ return;
+ vrl->vsi_id = adapter->vsi.id;
+ vrl->lut_entries = adapter->rss_lut_size;
+ memcpy(vrl->lut, adapter->rss_lut, adapter->rss_lut_size);
+ adapter->current_op = I40E_VIRTCHNL_OP_CONFIG_RSS_LUT;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_SET_RSS_LUT;
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ (u8 *)vrl, len);
+ kfree(vrl);
+}
+
/**
* i40evf_request_reset
* @adapter: adapter structure
@@ -820,6 +955,16 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (v_opcode != adapter->current_op)
return;
break;
+ case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS: {
+ struct i40e_virtchnl_rss_hena *vrh =
+ (struct i40e_virtchnl_rss_hena *)msg;
+ if (msglen == sizeof(*vrh))
+ adapter->hena = vrh->hena;
+ else
+ dev_warn(&adapter->pdev->dev,
+ "Invalid message %d from PF\n", v_opcode);
+ }
+ break;
default:
if (v_opcode != adapter->current_op)
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a23aa6704394..a61447fd778e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -361,7 +361,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
if (size > 15)
size = 15;
- nvm->word_size = 1 << size;
+ nvm->word_size = BIT(size);
nvm->opcode_bits = 8;
nvm->delay_usec = 1;
@@ -380,7 +380,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
16 : 8;
break;
}
- if (nvm->word_size == (1 << 15))
+ if (nvm->word_size == BIT(15))
nvm->page_size = 128;
nvm->type = e1000_nvm_eeprom_spi;
@@ -391,7 +391,7 @@ static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
nvm->ops.write = igb_write_nvm_spi;
nvm->ops.validate = igb_validate_nvm_checksum;
nvm->ops.update = igb_update_nvm_checksum;
- if (nvm->word_size < (1 << 15))
+ if (nvm->word_size < BIT(15))
nvm->ops.read = igb_read_nvm_eerd;
else
nvm->ops.read = igb_read_nvm_spi;
@@ -2107,7 +2107,7 @@ void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
/* The PF can spoof - it has to in order to
* support emulation mode NICs
*/
- reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+ reg_val ^= (BIT(pf) | BIT(pf + MAX_NUM_VFS));
} else {
reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
E1000_DTXSWC_VLAN_SPOOF_MASK);
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index de8805a2a2fe..199ff98209cf 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -168,16 +168,16 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */
#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
-#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
-#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
/* Additional DCA related definitions, note change in position of CPUID */
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
@@ -186,8 +186,8 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
/* ETQF register bit definitions */
-#define E1000_ETQF_FILTER_ENABLE (1 << 26)
-#define E1000_ETQF_1588 (1 << 30)
+#define E1000_ETQF_FILTER_ENABLE BIT(26)
+#define E1000_ETQF_1588 BIT(30)
/* FTQF register bit definitions */
#define E1000_FTQF_VF_BP 0x00008000
@@ -203,16 +203,16 @@ struct e1000_adv_tx_context_desc {
#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */
#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
-#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN BIT(31) /* global VF LB enable */
/* Easy defines for setting default pool, would normally be left a zero */
#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
#define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
/* Other useful VMD_CTL register defines */
-#define E1000_VT_CTL_IGNORE_MAC (1 << 28)
-#define E1000_VT_CTL_DISABLE_DEF_POOL (1 << 29)
-#define E1000_VT_CTL_VM_REPL_EN (1 << 30)
+#define E1000_VT_CTL_IGNORE_MAC BIT(28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL BIT(29)
+#define E1000_VT_CTL_VM_REPL_EN BIT(30)
/* Per VM Offload register setup */
#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
@@ -252,7 +252,7 @@ struct e1000_adv_tx_context_desc {
#define E1000_DTXCTL_MDP_EN 0x0020
#define E1000_DTXCTL_SPOOF_INT 0x0040
-#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT (1 << 14)
+#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT BIT(14)
#define ALL_QUEUES 0xFFFF
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index e9f23ee8f15e..2997c443c5dc 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -530,65 +530,65 @@
/* Time Sync Interrupt Cause/Mask Register Bits */
-#define TSINTR_SYS_WRAP (1 << 0) /* SYSTIM Wrap around. */
-#define TSINTR_TXTS (1 << 1) /* Transmit Timestamp. */
-#define TSINTR_RXTS (1 << 2) /* Receive Timestamp. */
-#define TSINTR_TT0 (1 << 3) /* Target Time 0 Trigger. */
-#define TSINTR_TT1 (1 << 4) /* Target Time 1 Trigger. */
-#define TSINTR_AUTT0 (1 << 5) /* Auxiliary Timestamp 0 Taken. */
-#define TSINTR_AUTT1 (1 << 6) /* Auxiliary Timestamp 1 Taken. */
-#define TSINTR_TADJ (1 << 7) /* Time Adjust Done. */
+#define TSINTR_SYS_WRAP BIT(0) /* SYSTIM Wrap around. */
+#define TSINTR_TXTS BIT(1) /* Transmit Timestamp. */
+#define TSINTR_RXTS BIT(2) /* Receive Timestamp. */
+#define TSINTR_TT0 BIT(3) /* Target Time 0 Trigger. */
+#define TSINTR_TT1 BIT(4) /* Target Time 1 Trigger. */
+#define TSINTR_AUTT0 BIT(5) /* Auxiliary Timestamp 0 Taken. */
+#define TSINTR_AUTT1 BIT(6) /* Auxiliary Timestamp 1 Taken. */
+#define TSINTR_TADJ BIT(7) /* Time Adjust Done. */
#define TSYNC_INTERRUPTS TSINTR_TXTS
#define E1000_TSICR_TXTS TSINTR_TXTS
/* TSAUXC Configuration Bits */
-#define TSAUXC_EN_TT0 (1 << 0) /* Enable target time 0. */
-#define TSAUXC_EN_TT1 (1 << 1) /* Enable target time 1. */
-#define TSAUXC_EN_CLK0 (1 << 2) /* Enable Configurable Frequency Clock 0. */
-#define TSAUXC_SAMP_AUT0 (1 << 3) /* Latch SYSTIML/H into AUXSTMPL/0. */
-#define TSAUXC_ST0 (1 << 4) /* Start Clock 0 Toggle on Target Time 0. */
-#define TSAUXC_EN_CLK1 (1 << 5) /* Enable Configurable Frequency Clock 1. */
-#define TSAUXC_SAMP_AUT1 (1 << 6) /* Latch SYSTIML/H into AUXSTMPL/1. */
-#define TSAUXC_ST1 (1 << 7) /* Start Clock 1 Toggle on Target Time 1. */
-#define TSAUXC_EN_TS0 (1 << 8) /* Enable hardware timestamp 0. */
-#define TSAUXC_AUTT0 (1 << 9) /* Auxiliary Timestamp Taken. */
-#define TSAUXC_EN_TS1 (1 << 10) /* Enable hardware timestamp 0. */
-#define TSAUXC_AUTT1 (1 << 11) /* Auxiliary Timestamp Taken. */
-#define TSAUXC_PLSG (1 << 17) /* Generate a pulse. */
-#define TSAUXC_DISABLE (1 << 31) /* Disable SYSTIM Count Operation. */
+#define TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */
+#define TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */
+#define TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */
+#define TSAUXC_SAMP_AUT0 BIT(3) /* Latch SYSTIML/H into AUXSTMPL/0. */
+#define TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */
+#define TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */
+#define TSAUXC_SAMP_AUT1 BIT(6) /* Latch SYSTIML/H into AUXSTMPL/1. */
+#define TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */
+#define TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT1 BIT(11) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_PLSG BIT(17) /* Generate a pulse. */
+#define TSAUXC_DISABLE BIT(31) /* Disable SYSTIM Count Operation. */
/* SDP Configuration Bits */
-#define AUX0_SEL_SDP0 (0 << 0) /* Assign SDP0 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP1 (1 << 0) /* Assign SDP1 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP2 (2 << 0) /* Assign SDP2 to auxiliary time stamp 0. */
-#define AUX0_SEL_SDP3 (3 << 0) /* Assign SDP3 to auxiliary time stamp 0. */
-#define AUX0_TS_SDP_EN (1 << 2) /* Enable auxiliary time stamp trigger 0. */
-#define AUX1_SEL_SDP0 (0 << 3) /* Assign SDP0 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP1 (1 << 3) /* Assign SDP1 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP2 (2 << 3) /* Assign SDP2 to auxiliary time stamp 1. */
-#define AUX1_SEL_SDP3 (3 << 3) /* Assign SDP3 to auxiliary time stamp 1. */
-#define AUX1_TS_SDP_EN (1 << 5) /* Enable auxiliary time stamp trigger 1. */
-#define TS_SDP0_SEL_TT0 (0 << 6) /* Target time 0 is output on SDP0. */
-#define TS_SDP0_SEL_TT1 (1 << 6) /* Target time 1 is output on SDP0. */
-#define TS_SDP0_SEL_FC0 (2 << 6) /* Freq clock 0 is output on SDP0. */
-#define TS_SDP0_SEL_FC1 (3 << 6) /* Freq clock 1 is output on SDP0. */
-#define TS_SDP0_EN (1 << 8) /* SDP0 is assigned to Tsync. */
-#define TS_SDP1_SEL_TT0 (0 << 9) /* Target time 0 is output on SDP1. */
-#define TS_SDP1_SEL_TT1 (1 << 9) /* Target time 1 is output on SDP1. */
-#define TS_SDP1_SEL_FC0 (2 << 9) /* Freq clock 0 is output on SDP1. */
-#define TS_SDP1_SEL_FC1 (3 << 9) /* Freq clock 1 is output on SDP1. */
-#define TS_SDP1_EN (1 << 11) /* SDP1 is assigned to Tsync. */
-#define TS_SDP2_SEL_TT0 (0 << 12) /* Target time 0 is output on SDP2. */
-#define TS_SDP2_SEL_TT1 (1 << 12) /* Target time 1 is output on SDP2. */
-#define TS_SDP2_SEL_FC0 (2 << 12) /* Freq clock 0 is output on SDP2. */
-#define TS_SDP2_SEL_FC1 (3 << 12) /* Freq clock 1 is output on SDP2. */
-#define TS_SDP2_EN (1 << 14) /* SDP2 is assigned to Tsync. */
-#define TS_SDP3_SEL_TT0 (0 << 15) /* Target time 0 is output on SDP3. */
-#define TS_SDP3_SEL_TT1 (1 << 15) /* Target time 1 is output on SDP3. */
-#define TS_SDP3_SEL_FC0 (2 << 15) /* Freq clock 0 is output on SDP3. */
-#define TS_SDP3_SEL_FC1 (3 << 15) /* Freq clock 1 is output on SDP3. */
-#define TS_SDP3_EN (1 << 17) /* SDP3 is assigned to Tsync. */
+#define AUX0_SEL_SDP0 (0u << 0) /* Assign SDP0 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP1 (1u << 0) /* Assign SDP1 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP2 (2u << 0) /* Assign SDP2 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP3 (3u << 0) /* Assign SDP3 to auxiliary time stamp 0. */
+#define AUX0_TS_SDP_EN (1u << 2) /* Enable auxiliary time stamp trigger 0. */
+#define AUX1_SEL_SDP0 (0u << 3) /* Assign SDP0 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP1 (1u << 3) /* Assign SDP1 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP2 (2u << 3) /* Assign SDP2 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP3 (3u << 3) /* Assign SDP3 to auxiliary time stamp 1. */
+#define AUX1_TS_SDP_EN (1u << 5) /* Enable auxiliary time stamp trigger 1. */
+#define TS_SDP0_SEL_TT0 (0u << 6) /* Target time 0 is output on SDP0. */
+#define TS_SDP0_SEL_TT1 (1u << 6) /* Target time 1 is output on SDP0. */
+#define TS_SDP0_SEL_FC0 (2u << 6) /* Freq clock 0 is output on SDP0. */
+#define TS_SDP0_SEL_FC1 (3u << 6) /* Freq clock 1 is output on SDP0. */
+#define TS_SDP0_EN (1u << 8) /* SDP0 is assigned to Tsync. */
+#define TS_SDP1_SEL_TT0 (0u << 9) /* Target time 0 is output on SDP1. */
+#define TS_SDP1_SEL_TT1 (1u << 9) /* Target time 1 is output on SDP1. */
+#define TS_SDP1_SEL_FC0 (2u << 9) /* Freq clock 0 is output on SDP1. */
+#define TS_SDP1_SEL_FC1 (3u << 9) /* Freq clock 1 is output on SDP1. */
+#define TS_SDP1_EN (1u << 11) /* SDP1 is assigned to Tsync. */
+#define TS_SDP2_SEL_TT0 (0u << 12) /* Target time 0 is output on SDP2. */
+#define TS_SDP2_SEL_TT1 (1u << 12) /* Target time 1 is output on SDP2. */
+#define TS_SDP2_SEL_FC0 (2u << 12) /* Freq clock 0 is output on SDP2. */
+#define TS_SDP2_SEL_FC1 (3u << 12) /* Freq clock 1 is output on SDP2. */
+#define TS_SDP2_EN (1u << 14) /* SDP2 is assigned to Tsync. */
+#define TS_SDP3_SEL_TT0 (0u << 15) /* Target time 0 is output on SDP3. */
+#define TS_SDP3_SEL_TT1 (1u << 15) /* Target time 1 is output on SDP3. */
+#define TS_SDP3_SEL_FC0 (2u << 15) /* Freq clock 0 is output on SDP3. */
+#define TS_SDP3_SEL_FC1 (3u << 15) /* Freq clock 1 is output on SDP3. */
+#define TS_SDP3_EN (1u << 17) /* SDP3 is assigned to Tsync. */
#define E1000_MDICNFG_EXT_MDIO 0x80000000 /* MDI ext/int destination */
#define E1000_MDICNFG_COM_MDIO 0x40000000 /* MDI shared w/ lan 0 */
@@ -997,8 +997,8 @@
#define E1000_M88E1543_FIBER_CTRL 0x0
#define E1000_EEE_ADV_DEV_I354 7
#define E1000_EEE_ADV_ADDR_I354 60
-#define E1000_EEE_ADV_100_SUPPORTED (1 << 1) /* 100BaseTx EEE Supported */
-#define E1000_EEE_ADV_1000_SUPPORTED (1 << 2) /* 1000BaseT EEE Supported */
+#define E1000_EEE_ADV_100_SUPPORTED BIT(1) /* 100BaseTx EEE Supported */
+#define E1000_EEE_ADV_1000_SUPPORTED BIT(2) /* 1000BaseT EEE Supported */
#define E1000_PCS_STATUS_DEV_I354 3
#define E1000_PCS_STATUS_ADDR_I354 1
#define E1000_PCS_STATUS_TX_LPI_IND 0x0200 /* Tx in LPI state */
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 07cf4fe58338..5010e2232c50 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -212,7 +212,7 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
* bits[4-0]: which bit in the register
*/
regidx = vlan / 32;
- vfta_delta = 1 << (vlan % 32);
+ vfta_delta = BIT(vlan % 32);
vfta = adapter->shadow_vfta[regidx];
/* vfta_delta represents the difference between the current value
@@ -243,12 +243,12 @@ s32 igb_vfta_set(struct e1000_hw *hw, u32 vlan, u32 vind,
bits = rd32(E1000_VLVF(vlvf_index));
/* set the pool bit */
- bits |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
+ bits |= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
if (vlan_on)
goto vlvf_update;
/* clear the pool bit */
- bits ^= 1 << (E1000_VLVF_POOLSEL_SHIFT + vind);
+ bits ^= BIT(E1000_VLVF_POOLSEL_SHIFT + vind);
if (!(bits & E1000_VLVF_POOLSEL_MASK)) {
/* Clear VFTA first, then disable VLVF. Otherwise
@@ -427,7 +427,7 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
mta = array_rd32(E1000_MTA, hash_reg);
- mta |= (1 << hash_bit);
+ mta |= BIT(hash_bit);
array_wr32(E1000_MTA, hash_reg, mta);
wrfl();
@@ -527,7 +527,7 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
- hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ hw->mac.mta_shadow[hash_reg] |= BIT(hash_bit);
mc_addr_list += (ETH_ALEN);
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 10f5c9e016a9..00e263f0c030 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -302,9 +302,9 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
u32 vflre = rd32(E1000_VFLRE);
s32 ret_val = -E1000_ERR_MBX;
- if (vflre & (1 << vf_number)) {
+ if (vflre & BIT(vf_number)) {
ret_val = 0;
- wr32(E1000_VFLRE, (1 << vf_number));
+ wr32(E1000_VFLRE, BIT(vf_number));
hw->mbx.stats.rsts++;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index e8280d0d7f02..3582c5cf8843 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -72,7 +72,7 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
u32 eecd = rd32(E1000_EECD);
u32 mask;
- mask = 0x01 << (count - 1);
+ mask = 1u << (count - 1);
if (nvm->type == e1000_nvm_eeprom_spi)
eecd |= E1000_EECD_DO;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 969a6ddafa3b..9b622b33bb5a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -91,10 +91,10 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
#define I82580_ADDR_REG 16
#define I82580_CFG_REG 22
-#define I82580_CFG_ASSERT_CRS_ON_TX (1 << 15)
-#define I82580_CFG_ENABLE_DOWNSHIFT (3 << 10) /* auto downshift 100/10 */
+#define I82580_CFG_ASSERT_CRS_ON_TX BIT(15)
+#define I82580_CFG_ENABLE_DOWNSHIFT (3u << 10) /* auto downshift 100/10 */
#define I82580_CTRL_REG 23
-#define I82580_CTRL_DOWNSHIFT_MASK (7 << 10)
+#define I82580_CTRL_DOWNSHIFT_MASK (7u << 10)
/* 82580 specific PHY registers */
#define I82580_PHY_CTRL_2 18
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 9413fa61392f..5387b3a96489 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -91,6 +91,14 @@ struct igb_adapter;
#define NVM_COMB_VER_OFF 0x0083
#define NVM_COMB_VER_PTR 0x003d
+/* Transmit and receive latency (for PTP timestamps) */
+#define IGB_I210_TX_LATENCY_10 9542
+#define IGB_I210_TX_LATENCY_100 1024
+#define IGB_I210_TX_LATENCY_1000 178
+#define IGB_I210_RX_LATENCY_10 20662
+#define IGB_I210_RX_LATENCY_100 2213
+#define IGB_I210_RX_LATENCY_1000 448
+
struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
@@ -169,7 +177,7 @@ enum igb_tx_flags {
* maintain a power of two alignment we have to limit ourselves to 32K.
*/
#define IGB_MAX_TXD_PWR 15
-#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
+#define IGB_MAX_DATA_PER_TXD (1u << IGB_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
@@ -437,6 +445,7 @@ struct igb_adapter {
unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
unsigned long last_rx_timestamp;
+ unsigned int ptp_flags;
spinlock_t tmreg_lock;
struct cyclecounter cc;
struct timecounter tc;
@@ -466,21 +475,24 @@ struct igb_adapter {
u16 eee_advert;
};
-#define IGB_FLAG_HAS_MSI (1 << 0)
-#define IGB_FLAG_DCA_ENABLED (1 << 1)
-#define IGB_FLAG_QUAD_PORT_A (1 << 2)
-#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
-#define IGB_FLAG_DMAC (1 << 4)
-#define IGB_FLAG_PTP (1 << 5)
-#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 6)
-#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 7)
-#define IGB_FLAG_WOL_SUPPORTED (1 << 8)
-#define IGB_FLAG_NEED_LINK_UPDATE (1 << 9)
-#define IGB_FLAG_MEDIA_RESET (1 << 10)
-#define IGB_FLAG_MAS_CAPABLE (1 << 11)
-#define IGB_FLAG_MAS_ENABLE (1 << 12)
-#define IGB_FLAG_HAS_MSIX (1 << 13)
-#define IGB_FLAG_EEE (1 << 14)
+/* flags controlling PTP/1588 function */
+#define IGB_PTP_ENABLED BIT(0)
+#define IGB_PTP_OVERFLOW_CHECK BIT(1)
+
+#define IGB_FLAG_HAS_MSI BIT(0)
+#define IGB_FLAG_DCA_ENABLED BIT(1)
+#define IGB_FLAG_QUAD_PORT_A BIT(2)
+#define IGB_FLAG_QUEUE_PAIRS BIT(3)
+#define IGB_FLAG_DMAC BIT(4)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP BIT(6)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP BIT(7)
+#define IGB_FLAG_WOL_SUPPORTED BIT(8)
+#define IGB_FLAG_NEED_LINK_UPDATE BIT(9)
+#define IGB_FLAG_MEDIA_RESET BIT(10)
+#define IGB_FLAG_MAS_CAPABLE BIT(11)
+#define IGB_FLAG_MAS_ENABLE BIT(12)
+#define IGB_FLAG_HAS_MSIX BIT(13)
+#define IGB_FLAG_EEE BIT(14)
#define IGB_FLAG_VLAN_PROMISC BIT(15)
/* Media Auto Sense */
@@ -538,6 +550,7 @@ void igb_set_fw_version(struct igb_adapter *);
void igb_ptp_init(struct igb_adapter *adapter);
void igb_ptp_stop(struct igb_adapter *adapter);
void igb_ptp_reset(struct igb_adapter *adapter);
+void igb_ptp_suspend(struct igb_adapter *adapter);
void igb_ptp_rx_hang(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 7982243d1f9b..64e91c575a39 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -466,7 +466,7 @@ static void igb_get_regs(struct net_device *netdev,
memset(p, 0, IGB_REGS_LEN * sizeof(u32));
- regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+ regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */
regs_buff[0] = rd32(E1000_CTRL);
@@ -1448,7 +1448,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Test each interrupt */
for (; i < 31; i++) {
/* Interrupt to test */
- mask = 1 << i;
+ mask = BIT(i);
if (!(mask & ics_mask))
continue;
@@ -2411,19 +2411,19 @@ static int igb_get_ts_info(struct net_device *dev,
SOF_TIMESTAMPING_RAW_HARDWARE;
info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
- info->rx_filters = 1 << HWTSTAMP_FILTER_NONE;
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
/* 82576 does not support timestamping all packets. */
if (adapter->hw.mac.type >= e1000_82580)
- info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL;
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
else
info->rx_filters |=
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
return 0;
default:
@@ -2831,7 +2831,8 @@ static int igb_get_module_eeprom(struct net_device *netdev,
/* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
for (i = 0; i < last_word - first_word + 1; i++) {
- status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
+ status = igb_read_phy_reg_i2c(hw, (first_word + i) * 2,
+ &dataword[i]);
if (status) {
/* Error occurred while reading module */
kfree(dataword);
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 55a1405cb2a1..942a89fb0090 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -50,6 +50,7 @@
#include <linux/aer.h>
#include <linux/prefetch.h>
#include <linux/pm_runtime.h>
+#include <linux/etherdevice.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -150,7 +151,7 @@ static void igb_update_dca(struct igb_q_vector *);
static void igb_setup_dca(struct igb_adapter *);
#endif /* CONFIG_IGB_DCA */
static int igb_poll(struct napi_struct *, int);
-static bool igb_clean_tx_irq(struct igb_q_vector *);
+static bool igb_clean_tx_irq(struct igb_q_vector *, int);
static int igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
@@ -382,7 +383,7 @@ static void igb_dump(struct igb_adapter *adapter)
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state trans_start last_rx\n");
pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
- netdev->state, netdev->trans_start, netdev->last_rx);
+ netdev->state, dev_trans_start(netdev), netdev->last_rx);
}
/* Print Registers */
@@ -835,7 +836,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
igb_write_ivar(hw, msix_vector,
tx_queue & 0x7,
((tx_queue & 0x8) << 1) + 8);
- q_vector->eims_value = 1 << msix_vector;
+ q_vector->eims_value = BIT(msix_vector);
break;
case e1000_82580:
case e1000_i350:
@@ -856,7 +857,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
igb_write_ivar(hw, msix_vector,
tx_queue >> 1,
((tx_queue & 0x1) << 4) + 8);
- q_vector->eims_value = 1 << msix_vector;
+ q_vector->eims_value = BIT(msix_vector);
break;
default:
BUG();
@@ -918,7 +919,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
E1000_GPIE_NSICR);
/* enable msix_other interrupt */
- adapter->eims_other = 1 << vector;
+ adapter->eims_other = BIT(vector);
tmp = (vector++ | E1000_IVAR_VALID) << 8;
wr32(E1000_IVAR_MISC, tmp);
@@ -2026,7 +2027,8 @@ void igb_reset(struct igb_adapter *adapter)
wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
/* Re-enable PTP, where applicable. */
- igb_ptp_reset(adapter);
+ if (adapter->ptp_flags & IGB_PTP_ENABLED)
+ igb_ptp_reset(adapter);
igb_get_phy_info(hw);
}
@@ -2086,6 +2088,40 @@ static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
}
+#define IGB_MAX_MAC_HDR_LEN 127
+#define IGB_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t
+igb_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ return features;
+}
+
static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open,
.ndo_stop = igb_close,
@@ -2110,7 +2146,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_fix_features = igb_fix_features,
.ndo_set_features = igb_set_features,
.ndo_fdb_add = igb_ndo_fdb_add,
- .ndo_features_check = passthru_features_check,
+ .ndo_features_check = igb_features_check,
};
/**
@@ -2288,9 +2324,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM),
- igb_driver_name);
+ err = pci_request_mem_regions(pdev, igb_driver_name);
if (err)
goto err_pci_reg;
@@ -2376,38 +2410,43 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_TSO6 |
NETIF_F_RXHASH |
NETIF_F_RXCSUM |
- NETIF_F_HW_CSUM |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_TX;
+ NETIF_F_HW_CSUM;
if (hw->mac.type >= e1000_82576)
netdev->features |= NETIF_F_SCTP_CRC;
+#define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
+
/* copy netdev features into list of user selectable features */
- netdev->hw_features |= netdev->features;
- netdev->hw_features |= NETIF_F_RXALL;
+ netdev->hw_features |= netdev->features |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_RXALL;
if (hw->mac.type >= e1000_i350)
netdev->hw_features |= NETIF_F_NTUPLE;
- /* set this bit last since it cannot be part of hw_features */
- netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
-
- netdev->vlan_features |= NETIF_F_SG |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_HW_CSUM |
- NETIF_F_SCTP_CRC;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
- netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= netdev->vlan_features;
- netdev->priv_flags |= IFF_SUPP_NOFCS;
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
+ netdev->priv_flags |= IFF_SUPP_NOFCS;
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -2442,9 +2481,11 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
break;
}
- /* copy the MAC address out of the NVM */
- if (hw->mac.ops.read_mac_addr(hw))
- dev_err(&pdev->dev, "NVM Read Error\n");
+ if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
+ /* copy the MAC address out of the NVM */
+ if (hw->mac.ops.read_mac_addr(hw))
+ dev_err(&pdev->dev, "NVM Read Error\n");
+ }
memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
@@ -2707,8 +2748,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
pci_disable_device(pdev);
@@ -2873,8 +2913,7 @@ static void igb_remove(struct pci_dev *pdev)
pci_iounmap(pdev, adapter->io_addr);
if (hw->flash_address)
iounmap(hw->flash_address);
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
kfree(adapter->shadow_vfta);
free_netdev(netdev);
@@ -4061,7 +4100,7 @@ static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
u32 vlvf = rd32(E1000_VLVF(i));
- vlvf |= 1 << pf_id;
+ vlvf |= BIT(pf_id);
wr32(E1000_VLVF(i), vlvf);
}
@@ -4088,7 +4127,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
/* guarantee that we don't scrub out management VLAN */
vid = adapter->mng_vlan_id;
if (vid >= vid_start && vid < vid_end)
- vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+ vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
if (!adapter->vfs_allocated_count)
goto set_vfta;
@@ -4107,7 +4146,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
if (vlvf & E1000_VLVF_VLANID_ENABLE) {
/* record VLAN ID in VFTA */
- vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+ vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
/* if PF is part of this then continue */
if (test_bit(vid, adapter->active_vlans))
@@ -4115,7 +4154,7 @@ static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
}
/* remove PF from the pool */
- bits = ~(1 << pf_id);
+ bits = ~BIT(pf_id);
bits &= rd32(E1000_VLVF(i));
wr32(E1000_VLVF(i), bits);
}
@@ -4273,13 +4312,13 @@ static void igb_spoof_check(struct igb_adapter *adapter)
return;
for (j = 0; j < adapter->vfs_allocated_count; j++) {
- if (adapter->wvbr & (1 << j) ||
- adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
+ if (adapter->wvbr & BIT(j) ||
+ adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
dev_warn(&adapter->pdev->dev,
"Spoof event(s) detected on VF %d\n", j);
adapter->wvbr &=
- ~((1 << j) |
- (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
+ ~(BIT(j) |
+ BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
}
}
}
@@ -4839,9 +4878,18 @@ static int igb_tso(struct igb_ring *tx_ring,
struct igb_tx_buffer *first,
u8 *hdr_len)
{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb;
- u32 vlan_macip_lens, type_tucmd;
- u32 mss_l4len_idx, l4len;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -4854,45 +4902,52 @@ static int igb_tso(struct igb_ring *tx_ring,
if (err < 0)
return err;
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
+
+ ip.v4->tot_len = 0;
first->tx_flags |= IGB_TX_FLAGS_TSO |
IGB_TX_FLAGS_CSUM |
IGB_TX_FLAGS_IPV4;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ } else {
+ ip.v6->payload_len = 0;
first->tx_flags |= IGB_TX_FLAGS_TSO |
IGB_TX_FLAGS_CSUM;
}
- /* compute header lengths */
- l4len = tcp_hdrlen(skb);
- *hdr_len = skb_transport_offset(skb) + l4len;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* MSS L4LEN IDX */
- mss_l4len_idx = l4len << E1000_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
/* VLAN MACLEN IPLEN */
- vlan_macip_lens = skb_network_header_len(skb);
- vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
@@ -5960,11 +6015,11 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
/* create mask for VF and other pools */
pool_mask = E1000_VLVF_POOLSEL_MASK;
- vlvf_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf);
+ vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
/* drop PF from pool bits */
- pool_mask &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT +
- adapter->vfs_allocated_count));
+ pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
+ adapter->vfs_allocated_count);
/* Find the vlan filter for this id */
for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
@@ -5987,7 +6042,7 @@ static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
goto update_vlvf;
vid = vlvf & E1000_VLVF_VLANID_MASK;
- vfta_mask = 1 << (vid % 32);
+ vfta_mask = BIT(vid % 32);
/* clear bit from VFTA */
vfta = adapter->shadow_vfta[vid / 32];
@@ -6024,7 +6079,7 @@ static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
return idx;
}
-void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
+static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
{
struct e1000_hw *hw = &adapter->hw;
u32 bits, pf_id;
@@ -6038,13 +6093,13 @@ void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
* entry other than the PF.
*/
pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
- bits = ~(1 << pf_id) & E1000_VLVF_POOLSEL_MASK;
+ bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
bits &= rd32(E1000_VLVF(idx));
/* Disable the filter so this falls into the default pool. */
if (!bits) {
if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
- wr32(E1000_VLVF(idx), 1 << pf_id);
+ wr32(E1000_VLVF(idx), BIT(pf_id));
else
wr32(E1000_VLVF(idx), 0);
}
@@ -6228,9 +6283,9 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
/* enable transmit and receive for vf */
reg = rd32(E1000_VFTE);
- wr32(E1000_VFTE, reg | (1 << vf));
+ wr32(E1000_VFTE, reg | BIT(vf));
reg = rd32(E1000_VFRE);
- wr32(E1000_VFRE, reg | (1 << vf));
+ wr32(E1000_VFRE, reg | BIT(vf));
adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
@@ -6522,13 +6577,14 @@ static int igb_poll(struct napi_struct *napi, int budget)
igb_update_dca(q_vector);
#endif
if (q_vector->tx.ring)
- clean_complete = igb_clean_tx_irq(q_vector);
+ clean_complete = igb_clean_tx_irq(q_vector, budget);
if (q_vector->rx.ring) {
int cleaned = igb_clean_rx_irq(q_vector, budget);
work_done += cleaned;
- clean_complete &= (cleaned < budget);
+ if (cleaned >= budget)
+ clean_complete = false;
}
/* If all work not completed, return budget and keep polling */
@@ -6545,10 +6601,11 @@ static int igb_poll(struct napi_struct *napi, int budget)
/**
* igb_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: pointer to q_vector containing needed info
+ * @napi_budget: Used to determine if we are in netpoll
*
* returns true if ring is completely cleaned
**/
-static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
+static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
{
struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *tx_ring = q_vector->tx.ring;
@@ -6587,7 +6644,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
total_packets += tx_buffer->gso_segs;
/* free the skb */
- dev_consume_skb_any(tx_buffer->skb);
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -6795,12 +6852,12 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
**/
static bool igb_add_rx_frag(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer,
+ unsigned int size,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset;
- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IGB_RX_BUFSZ;
#else
@@ -6852,6 +6909,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
struct igb_rx_buffer *rx_buffer;
struct page *page;
@@ -6887,11 +6945,11 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
- IGB_RX_BUFSZ,
+ size,
DMA_FROM_DEVICE);
/* pull page into skb */
- if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) {
+ if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
@@ -7467,6 +7525,8 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
if (netif_running(netdev))
__igb_close(netdev, true);
+ igb_ptp_suspend(adapter);
+
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PM
@@ -7574,7 +7634,6 @@ static int igb_resume(struct device *dev)
if (igb_init_interrupt_scheme(adapter, true)) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
- rtnl_unlock();
return -ENOMEM;
}
@@ -7845,11 +7904,13 @@ static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
struct e1000_hw *hw = &adapter->hw;
u32 rar_low, rar_high;
- /* HW expects these in little endian so we reverse the byte order
- * from network order (big endian) to CPU endian
+ /* HW expects these to be in network order when they are plugged
+ * into the registers which are little endian. In order to guarantee
+ * that ordering we need to do an leXX_to_cpup here in order to be
+ * ready for the byteswap that occurs with writel
*/
- rar_low = le32_to_cpup((__be32 *)(addr));
- rar_high = le16_to_cpup((__be16 *)(addr + 4));
+ rar_low = le32_to_cpup((__le32 *)(addr));
+ rar_high = le16_to_cpup((__le16 *)(addr + 4));
/* Indicate to hardware the Address is Valid. */
rar_high |= E1000_RAH_AV;
@@ -7921,7 +7982,7 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
/* Calculate the rate factor values to set */
rf_int = link_speed / tx_rate;
rf_dec = (link_speed - (rf_int * tx_rate));
- rf_dec = (rf_dec * (1 << E1000_RTTBCNRC_RF_INT_SHIFT)) /
+ rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
tx_rate;
bcnrc_val = E1000_RTTBCNRC_RS_ENA;
@@ -8011,11 +8072,11 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
reg_val = rd32(reg_offset);
if (setting)
- reg_val |= ((1 << vf) |
- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ reg_val |= (BIT(vf) |
+ BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
else
- reg_val &= ~((1 << vf) |
- (1 << (vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT)));
+ reg_val &= ~(BIT(vf) |
+ BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
wr32(reg_offset, reg_val);
adapter->vf_data[vf].spoofchk_enabled = setting;
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 22a8a29895b4..336c103ae374 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -69,9 +69,9 @@
#define IGB_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9)
#define IGB_PTP_TX_TIMEOUT (HZ * 15)
-#define INCPERIOD_82576 (1 << E1000_TIMINCA_16NS_SHIFT)
-#define INCVALUE_82576_MASK ((1 << E1000_TIMINCA_16NS_SHIFT) - 1)
-#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
+#define INCPERIOD_82576 BIT(E1000_TIMINCA_16NS_SHIFT)
+#define INCVALUE_82576_MASK GENMASK(E1000_TIMINCA_16NS_SHIFT - 1, 0)
+#define INCVALUE_82576 (16u << IGB_82576_TSYNC_SHIFT)
#define IGB_NBITS_82580 40
static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
@@ -684,6 +684,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
u32 tsyncrxctl = rd32(E1000_TSYNCRXCTL);
unsigned long rx_event;
+ /* Other hardware uses per-packet timestamps */
if (hw->mac.type != e1000_82576)
return;
@@ -722,11 +723,30 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
u64 regval;
+ int adjust = 0;
regval = rd32(E1000_TXSTMPL);
regval |= (u64)rd32(E1000_TXSTMPH) << 32;
igb_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
+ /* adjust timestamp for the TX latency based on link speed */
+ if (adapter->hw.mac.type == e1000_i210) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGB_I210_TX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGB_I210_TX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGB_I210_TX_LATENCY_1000;
+ break;
+ }
+ }
+
+ shhwtstamps.hwtstamp =
+ ktime_add_ns(shhwtstamps.hwtstamp, adjust);
+
skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
@@ -748,13 +768,32 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb)
{
__le64 *regval = (__le64 *)va;
+ struct igb_adapter *adapter = q_vector->adapter;
+ int adjust = 0;
/* The timestamp is recorded in little endian format.
* DWORD: 0 1 2 3
* Field: Reserved Reserved SYSTIML SYSTIMH
*/
- igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
+ igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
le64_to_cpu(regval[1]));
+
+ /* adjust timestamp for the RX latency based on link speed */
+ if (adapter->hw.mac.type == e1000_i210) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGB_I210_RX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGB_I210_RX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGB_I210_RX_LATENCY_1000;
+ break;
+ }
+ }
+ skb_hwtstamps(skb)->hwtstamp =
+ ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
}
/**
@@ -771,6 +810,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
u64 regval;
+ int adjust = 0;
/* If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so
@@ -790,6 +830,23 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
+ /* adjust timestamp for the RX latency based on link speed */
+ if (adapter->hw.mac.type == e1000_i210) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGB_I210_RX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGB_I210_RX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGB_I210_RX_LATENCY_1000;
+ break;
+ }
+ }
+ skb_hwtstamps(skb)->hwtstamp =
+ ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+
/* Update the last_rx_timestamp timer in order to enable watchdog check
* for error case of latched timestamp on a dropped packet.
*/
@@ -1006,6 +1063,13 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
-EFAULT : 0;
}
+/**
+ * igb_ptp_init - Initialize PTP functionality
+ * @adapter: Board private structure
+ *
+ * This function is called at device probe to initialize the PTP
+ * functionality.
+ */
void igb_ptp_init(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
@@ -1028,8 +1092,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->cc.mask = CYCLECOUNTER_MASK(64);
adapter->cc.mult = 1;
adapter->cc.shift = IGB_82576_TSYNC_SHIFT;
- /* Dial the nominal frequency. */
- wr32(E1000_TIMINCA, INCPERIOD_82576 | INCVALUE_82576);
+ adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK;
break;
case e1000_82580:
case e1000_i354:
@@ -1048,8 +1111,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->cc.mask = CYCLECOUNTER_MASK(IGB_NBITS_82580);
adapter->cc.mult = 1;
adapter->cc.shift = 0;
- /* Enable the timer functions by clearing bit 31. */
- wr32(E1000_TSAUXC, 0x0);
+ adapter->ptp_flags |= IGB_PTP_OVERFLOW_CHECK;
break;
case e1000_i210:
case e1000_i211:
@@ -1074,44 +1136,24 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.settime64 = igb_ptp_settime_i210;
adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
adapter->ptp_caps.verify = igb_ptp_verify_pin;
- /* Enable the timer functions by clearing bit 31. */
- wr32(E1000_TSAUXC, 0x0);
break;
default:
adapter->ptp_clock = NULL;
return;
}
- wrfl();
-
spin_lock_init(&adapter->tmreg_lock);
INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
- /* Initialize the clock and overflow work for devices that need it. */
- if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
- struct timespec64 ts = ktime_to_timespec64(ktime_get_real());
-
- igb_ptp_settime_i210(&adapter->ptp_caps, &ts);
- } else {
- timecounter_init(&adapter->tc, &adapter->cc,
- ktime_to_ns(ktime_get_real()));
-
+ if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
igb_ptp_overflow_check);
- schedule_delayed_work(&adapter->ptp_overflow_work,
- IGB_SYSTIM_OVERFLOW_PERIOD);
- }
-
- /* Initialize the time sync interrupts for devices that support it. */
- if (hw->mac.type >= e1000_82580) {
- wr32(E1000_TSIM, TSYNC_INTERRUPTS);
- wr32(E1000_IMS, E1000_IMS_TS);
- }
-
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+ igb_ptp_reset(adapter);
+
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -1120,32 +1162,24 @@ void igb_ptp_init(struct igb_adapter *adapter)
} else {
dev_info(&adapter->pdev->dev, "added PHC on %s\n",
adapter->netdev->name);
- adapter->flags |= IGB_FLAG_PTP;
+ adapter->ptp_flags |= IGB_PTP_ENABLED;
}
}
/**
- * igb_ptp_stop - Disable PTP device and stop the overflow check.
- * @adapter: Board private structure.
+ * igb_ptp_suspend - Disable PTP work items and prepare for suspend
+ * @adapter: Board private structure
*
- * This function stops the PTP support and cancels the delayed work.
- **/
-void igb_ptp_stop(struct igb_adapter *adapter)
+ * This function stops the overflow check work and PTP Tx timestamp work, and
+ * will prepare the device for OS suspend.
+ */
+void igb_ptp_suspend(struct igb_adapter *adapter)
{
- switch (adapter->hw.mac.type) {
- case e1000_82576:
- case e1000_82580:
- case e1000_i354:
- case e1000_i350:
- cancel_delayed_work_sync(&adapter->ptp_overflow_work);
- break;
- case e1000_i210:
- case e1000_i211:
- /* No delayed work to cancel. */
- break;
- default:
+ if (!(adapter->ptp_flags & IGB_PTP_ENABLED))
return;
- }
+
+ if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
+ cancel_delayed_work_sync(&adapter->ptp_overflow_work);
cancel_work_sync(&adapter->ptp_tx_work);
if (adapter->ptp_tx_skb) {
@@ -1153,12 +1187,23 @@ void igb_ptp_stop(struct igb_adapter *adapter)
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
}
+}
+
+/**
+ * igb_ptp_stop - Disable PTP device and stop the overflow check.
+ * @adapter: Board private structure.
+ *
+ * This function stops the PTP support and cancels the delayed work.
+ **/
+void igb_ptp_stop(struct igb_adapter *adapter)
+{
+ igb_ptp_suspend(adapter);
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
dev_info(&adapter->pdev->dev, "removed PHC on %s\n",
adapter->netdev->name);
- adapter->flags &= ~IGB_FLAG_PTP;
+ adapter->ptp_flags &= ~IGB_PTP_ENABLED;
}
}
@@ -1173,9 +1218,6 @@ void igb_ptp_reset(struct igb_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
unsigned long flags;
- if (!(adapter->flags & IGB_FLAG_PTP))
- return;
-
/* reset the tstamp_config */
igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
@@ -1212,4 +1254,10 @@ void igb_ptp_reset(struct igb_adapter *adapter)
}
out:
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+
+ wrfl();
+
+ if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
+ schedule_delayed_work(&adapter->ptp_overflow_work,
+ IGB_SYSTIM_OVERFLOW_PERIOD);
}
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index ae3f28332fa0..ee1ef08d7fc4 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -113,7 +113,7 @@
#define E1000_RXDCTL_QUEUE_ENABLE 0x02000000 /* Enable specific Rx Que */
/* Direct Cache Access (DCA) definitions */
-#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
#define E1000_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index b74ce53d7b52..8dea1b1367ef 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -154,7 +154,8 @@ static void igbvf_get_regs(struct net_device *netdev,
memset(p, 0, IGBVF_REGS_LEN * sizeof(u32));
- regs->version = (1 << 24) | (adapter->pdev->revision << 16) |
+ regs->version = (1u << 24) |
+ (adapter->pdev->revision << 16) |
adapter->pdev->device;
regs_buff[0] = er32(CTRL);
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index f166baab8d7e..6f4290d6dc9f 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -287,8 +287,8 @@ struct igbvf_info {
};
/* hardware capability, feature, and workaround flags */
-#define IGBVF_FLAG_RX_CSUM_DISABLED (1 << 0)
-#define IGBVF_FLAG_RX_LB_VLAN_BSWAP (1 << 1)
+#define IGBVF_FLAG_RX_CSUM_DISABLED BIT(0)
+#define IGBVF_FLAG_RX_LB_VLAN_BSWAP BIT(1)
#define IGBVF_RX_DESC_ADV(R, i) \
(&((((R).desc))[i].rx_desc))
#define IGBVF_TX_DESC_ADV(R, i) \
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index c12442252adb..b0778ba65083 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -964,7 +964,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
ivar = ivar & 0xFFFFFF00;
ivar |= msix_vector | E1000_IVAR_VALID;
}
- adapter->rx_ring[rx_queue].eims_value = 1 << msix_vector;
+ adapter->rx_ring[rx_queue].eims_value = BIT(msix_vector);
array_ew32(IVAR0, index, ivar);
}
if (tx_queue > IGBVF_NO_QUEUE) {
@@ -979,7 +979,7 @@ static void igbvf_assign_vector(struct igbvf_adapter *adapter, int rx_queue,
ivar = ivar & 0xFFFF00FF;
ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
}
- adapter->tx_ring[tx_queue].eims_value = 1 << msix_vector;
+ adapter->tx_ring[tx_queue].eims_value = BIT(msix_vector);
array_ew32(IVAR0, index, ivar);
}
}
@@ -1014,8 +1014,8 @@ static void igbvf_configure_msix(struct igbvf_adapter *adapter)
ew32(IVAR_MISC, tmp);
- adapter->eims_enable_mask = (1 << (vector)) - 1;
- adapter->eims_other = 1 << (vector - 1);
+ adapter->eims_enable_mask = GENMASK(vector - 1, 0);
+ adapter->eims_other = BIT(vector - 1);
e1e_flush();
}
@@ -1367,7 +1367,7 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct igbvf_ring *rx_ring = adapter->rx_ring;
u64 rdba;
- u32 rdlen, rxdctl;
+ u32 rxdctl;
/* disable receives */
rxdctl = er32(RXDCTL(0));
@@ -1375,8 +1375,6 @@ static void igbvf_configure_rx(struct igbvf_adapter *adapter)
e1e_flush();
msleep(10);
- rdlen = rx_ring->count * sizeof(union e1000_adv_rx_desc);
-
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
@@ -1933,83 +1931,74 @@ static void igbvf_tx_ctxtdesc(struct igbvf_ring *tx_ring, u32 vlan_macip_lens,
buffer_info->dma = 0;
}
-static int igbvf_tso(struct igbvf_adapter *adapter,
- struct igbvf_ring *tx_ring,
- struct sk_buff *skb, u32 tx_flags, u8 *hdr_len,
- __be16 protocol)
-{
- struct e1000_adv_tx_context_desc *context_desc;
- struct igbvf_buffer *buffer_info;
- u32 info = 0, tu_cmd = 0;
- u32 mss_l4len_idx, l4len;
- unsigned int i;
+static int igbvf_tso(struct igbvf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
- *hdr_len = 0;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
err = skb_cow_head(skb, 0);
- if (err < 0) {
- dev_err(&adapter->pdev->dev, "igbvf_tso returning an error\n");
+ if (err < 0)
return err;
- }
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
-
- if (protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
-
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
- }
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
- i = tx_ring->next_to_use;
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
- buffer_info = &tx_ring->buffer_info[i];
- context_desc = IGBVF_TX_CTXTDESC_ADV(*tx_ring, i);
- /* VLAN MACLEN IPLEN */
- if (tx_flags & IGBVF_TX_FLAGS_VLAN)
- info |= (tx_flags & IGBVF_TX_FLAGS_VLAN_MASK);
- info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
- *hdr_len += skb_network_offset(skb);
- info |= (skb_transport_header(skb) - skb_network_header(skb));
- *hdr_len += (skb_transport_header(skb) - skb_network_header(skb));
- context_desc->vlan_macip_lens = cpu_to_le32(info);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
+ type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
- /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
- tu_cmd |= (E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT);
+ ip.v4->tot_len = 0;
+ } else {
+ ip.v6->payload_len = 0;
+ }
- if (protocol == htons(ETH_P_IP))
- tu_cmd |= E1000_ADVTXD_TUCMD_IPV4;
- tu_cmd |= E1000_ADVTXD_TUCMD_L4T_TCP;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
- context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
- /* MSS L4LEN IDX */
- mss_l4len_idx = (skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT);
- mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
- context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
- context_desc->seqnum_seed = 0;
+ /* MSS L4LEN IDX */
+ mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
- buffer_info->time_stamp = jiffies;
- buffer_info->dma = 0;
- i++;
- if (i == tx_ring->count)
- i = 0;
+ /* VLAN MACLEN IPLEN */
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens |= tx_flags & IGBVF_TX_FLAGS_VLAN_MASK;
- tx_ring->next_to_use = i;
+ igbvf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
- return true;
+ return 1;
}
static inline bool igbvf_ipv6_csum_is_sctp(struct sk_buff *skb)
@@ -2091,7 +2080,7 @@ static int igbvf_maybe_stop_tx(struct net_device *netdev, int size)
}
#define IGBVF_MAX_TXD_PWR 16
-#define IGBVF_MAX_DATA_PER_TXD (1 << IGBVF_MAX_TXD_PWR)
+#define IGBVF_MAX_DATA_PER_TXD (1u << IGBVF_MAX_TXD_PWR)
static inline int igbvf_tx_map_adv(struct igbvf_adapter *adapter,
struct igbvf_ring *tx_ring,
@@ -2271,8 +2260,7 @@ static netdev_tx_t igbvf_xmit_frame_ring_adv(struct sk_buff *skb,
first = tx_ring->next_to_use;
- tso = skb_is_gso(skb) ?
- igbvf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len, protocol) : 0;
+ tso = igbvf_tso(tx_ring, skb, tx_flags, &hdr_len);
if (unlikely(tso < 0)) {
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
@@ -2615,6 +2603,40 @@ static int igbvf_set_features(struct net_device *netdev,
return 0;
}
+#define IGBVF_MAX_MAC_HDR_LEN 127
+#define IGBVF_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t
+igbvf_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IGBVF_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IGBVF_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ return features;
+}
+
static const struct net_device_ops igbvf_netdev_ops = {
.ndo_open = igbvf_open,
.ndo_stop = igbvf_close,
@@ -2631,7 +2653,7 @@ static const struct net_device_ops igbvf_netdev_ops = {
.ndo_poll_controller = igbvf_netpoll,
#endif
.ndo_set_features = igbvf_set_features,
- .ndo_features_check = passthru_features_check,
+ .ndo_features_check = igbvf_features_check,
};
/**
@@ -2739,22 +2761,30 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+#define IGBVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
+
+ netdev->gso_partial_features = IGBVF_GSO_PARTIAL_FEATURES;
+ netdev->hw_features |= NETIF_F_GSO_PARTIAL |
+ IGBVF_GSO_PARTIAL_FEATURES;
+
+ netdev->features = netdev->hw_features;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_SG |
- NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_HW_CSUM |
- NETIF_F_SCTP_CRC;
-
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
netdev->mpls_features |= NETIF_F_HW_CSUM;
- netdev->hw_enc_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= netdev->vlan_features;
+
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
/*reset the controller to put the device in a known good state */
err = hw->mac.ops.reset_hw(hw);
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index a13baa90ae20..335ba6642145 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -266,7 +266,7 @@ static s32 e1000_set_vfta_vf(struct e1000_hw *hw, u16 vid, bool set)
msgbuf[1] = vid;
/* Setting the 8 bit field MSG INFO to true indicates "add" */
if (set)
- msgbuf[0] |= 1 << E1000_VT_MSGINFO_SHIFT;
+ msgbuf[0] |= BIT(E1000_VT_MSGINFO_SHIFT);
mbx->ops.write_posted(hw, msgbuf, 2);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index e4949af7dd6b..9475ff9055aa 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -143,14 +143,11 @@ struct vf_data_storage {
unsigned char vf_mac_addresses[ETH_ALEN];
u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
u16 num_vf_mc_hashes;
- u16 default_vf_vlan_id;
- u16 vlans_enabled;
bool clear_to_send;
bool pf_set_mac;
u16 pf_vlan; /* When set, guest VLAN config not allowed. */
u16 pf_qos;
u16 tx_rate;
- u16 vlan_count;
u8 spoofchk_enabled;
bool rss_query_enabled;
u8 trusted;
@@ -173,7 +170,7 @@ struct vf_macvlans {
};
#define IXGBE_MAX_TXD_PWR 14
-#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+#define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
@@ -456,7 +453,7 @@ static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector)
IXGBE_QV_STATE_POLL);
#ifdef BP_EXTENDED_STATS
if (rc != IXGBE_QV_STATE_IDLE)
- q_vector->tx.ring->stats.yields++;
+ q_vector->rx.ring->stats.yields++;
#endif
return rc == IXGBE_QV_STATE_IDLE;
}
@@ -623,44 +620,45 @@ struct ixgbe_adapter {
* thus the additional *_CAPABLE flags.
*/
u32 flags;
-#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
-#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
-#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4)
-#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5)
-#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6)
-#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
-#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9)
-#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11)
-#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12)
-#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13)
-#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14)
-#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15)
-#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16)
-#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19)
-#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20)
-#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21)
-#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22)
-#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23)
+#define IXGBE_FLAG_MSI_ENABLED BIT(1)
+#define IXGBE_FLAG_MSIX_ENABLED BIT(3)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4)
+#define IXGBE_FLAG_RX_PS_CAPABLE BIT(5)
+#define IXGBE_FLAG_RX_PS_ENABLED BIT(6)
+#define IXGBE_FLAG_DCA_ENABLED BIT(8)
+#define IXGBE_FLAG_DCA_CAPABLE BIT(9)
+#define IXGBE_FLAG_IMIR_ENABLED BIT(10)
+#define IXGBE_FLAG_MQ_CAPABLE BIT(11)
+#define IXGBE_FLAG_DCB_ENABLED BIT(12)
+#define IXGBE_FLAG_VMDQ_CAPABLE BIT(13)
+#define IXGBE_FLAG_VMDQ_ENABLED BIT(14)
+#define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15)
+#define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16)
+#define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19)
+#define IXGBE_FLAG_FCOE_CAPABLE BIT(20)
+#define IXGBE_FLAG_FCOE_ENABLED BIT(21)
+#define IXGBE_FLAG_SRIOV_CAPABLE BIT(22)
+#define IXGBE_FLAG_SRIOV_ENABLED BIT(23)
#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24)
#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25)
#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26)
+#define IXGBE_FLAG_DCB_CAPABLE BIT(27)
u32 flags2;
-#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0)
-#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
-#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2)
-#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3)
-#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4)
-#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5)
-#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
-#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
-#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
-#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
-#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10)
-#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 11)
+#define IXGBE_FLAG2_RSC_CAPABLE BIT(0)
+#define IXGBE_FLAG2_RSC_ENABLED BIT(1)
+#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2)
+#define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3)
+#define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4)
+#define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5)
+#define IXGBE_FLAG2_RESET_REQUESTED BIT(6)
+#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7)
+#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8)
+#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
+#define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
+#define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
#define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
@@ -795,7 +793,7 @@ struct ixgbe_adapter {
unsigned long fwd_bitmask; /* Bitmask indicating in use pools */
#define IXGBE_MAX_LINK_HANDLE 10
- struct ixgbe_mat_field *jump_tables[IXGBE_MAX_LINK_HANDLE];
+ struct ixgbe_jump_table *jump_tables[IXGBE_MAX_LINK_HANDLE];
unsigned long tables;
/* maximum number of RETA entries among all devices supported by ixgbe
@@ -817,6 +815,7 @@ static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
return IXGBE_MAX_RSS_INDICES;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
return IXGBE_MAX_RSS_INDICES_X550;
default:
return 0;
@@ -827,7 +826,7 @@ struct ixgbe_fdir_filter {
struct hlist_node fdir_node;
union ixgbe_atr_input filter;
u16 sw_idx;
- u16 action;
+ u64 action;
};
enum ixgbe_state_t {
@@ -860,13 +859,15 @@ enum ixgbe_boards {
board_X540,
board_X550,
board_X550EM_x,
+ board_x550em_a,
};
-extern struct ixgbe_info ixgbe_82598_info;
-extern struct ixgbe_info ixgbe_82599_info;
-extern struct ixgbe_info ixgbe_X540_info;
-extern struct ixgbe_info ixgbe_X550_info;
-extern struct ixgbe_info ixgbe_X550EM_x_info;
+extern const struct ixgbe_info ixgbe_82598_info;
+extern const struct ixgbe_info ixgbe_82599_info;
+extern const struct ixgbe_info ixgbe_X540_info;
+extern const struct ixgbe_info ixgbe_X550_info;
+extern const struct ixgbe_info ixgbe_X550EM_x_info;
+extern const struct ixgbe_info ixgbe_x550em_a_info;
#ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops dcbnl_ops;
#endif
@@ -893,8 +894,8 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *);
void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *);
void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
-int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
- u16 subdevice_id);
+bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+ u16 subdevice_id);
#ifdef CONFIG_PCI_IOV
void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
#endif
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index d8a9fb8a59e2..fb51be74dd4c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -792,7 +792,7 @@ mac_reset_top:
}
gheccr = IXGBE_READ_REG(hw, IXGBE_GHECCR);
- gheccr &= ~((1 << 21) | (1 << 18) | (1 << 9) | (1 << 6));
+ gheccr &= ~(BIT(21) | BIT(18) | BIT(9) | BIT(6));
IXGBE_WRITE_REG(hw, IXGBE_GHECCR, gheccr);
/*
@@ -914,10 +914,10 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
if (vlan_on)
/* Turn on this VLAN id */
- bits |= (1 << bitindex);
+ bits |= BIT(bitindex);
else
/* Turn off this VLAN id */
- bits &= ~(1 << bitindex);
+ bits &= ~BIT(bitindex);
IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
return 0;
@@ -1160,7 +1160,7 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
}
-static struct ixgbe_mac_operations mac_ops_82598 = {
+static const struct ixgbe_mac_operations mac_ops_82598 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82598,
.start_hw = &ixgbe_start_hw_82598,
@@ -1192,9 +1192,11 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.clear_vfta = &ixgbe_clear_vfta_82598,
.set_vfta = &ixgbe_set_vfta_82598,
.fc_enable = &ixgbe_fc_enable_82598,
+ .setup_fc = ixgbe_setup_fc_generic,
.set_fw_drv_ver = NULL,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
.release_swfw_sync = &ixgbe_release_swfw_sync,
+ .init_swfw_sync = NULL,
.get_thermal_sensor_data = NULL,
.init_thermal_sensor_thresh = NULL,
.prot_autoc_read = &prot_autoc_read_generic,
@@ -1203,7 +1205,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.disable_rx = &ixgbe_disable_rx_generic,
};
-static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eerd_generic,
.write = &ixgbe_write_eeprom_generic,
@@ -1214,7 +1216,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
-static struct ixgbe_phy_operations phy_ops_82598 = {
+static const struct ixgbe_phy_operations phy_ops_82598 = {
.identify = &ixgbe_identify_phy_generic,
.identify_sfp = &ixgbe_identify_module_generic,
.init = &ixgbe_init_phy_ops_82598,
@@ -1230,7 +1232,7 @@ static struct ixgbe_phy_operations phy_ops_82598 = {
.check_overtemp = &ixgbe_tn_check_overtemp,
};
-struct ixgbe_info ixgbe_82598_info = {
+const struct ixgbe_info ixgbe_82598_info = {
.mac = ixgbe_mac_82598EB,
.get_invariants = &ixgbe_get_invariants_82598,
.mac_ops = &mac_ops_82598,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index fa8d4f40ac2a..63b25006ac90 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -1296,17 +1296,17 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl)
#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
do { \
u32 n = (_n); \
- if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
+ if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n)) \
common_hash ^= lo_hash_dword >> n; \
- else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
bucket_hash ^= lo_hash_dword >> n; \
- else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n)) \
sig_hash ^= lo_hash_dword << (16 - n); \
- if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
+ if (IXGBE_ATR_COMMON_HASH_KEY & BIT(n + 16)) \
common_hash ^= hi_hash_dword >> n; \
- else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ else if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
bucket_hash ^= hi_hash_dword >> n; \
- else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
+ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & BIT(n + 16)) \
sig_hash ^= hi_hash_dword << (16 - n); \
} while (0)
@@ -1440,9 +1440,9 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
#define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \
do { \
u32 n = (_n); \
- if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n)) \
bucket_hash ^= lo_hash_dword >> n; \
- if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
+ if (IXGBE_ATR_BUCKET_HASH_KEY & BIT(n + 16)) \
bucket_hash ^= hi_hash_dword >> n; \
} while (0)
@@ -1633,6 +1633,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw,
switch (hw->mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm);
break;
default:
@@ -1812,9 +1813,6 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
/* We need to run link autotry after the driver loads */
hw->mac.autotry_restart = true;
- if (ret_val)
- return ret_val;
-
return ixgbe_verify_fw_version_82599(hw);
}
@@ -2181,7 +2179,7 @@ release_i2c_access:
return status;
}
-static struct ixgbe_mac_operations mac_ops_82599 = {
+static const struct ixgbe_mac_operations mac_ops_82599 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_82599,
.start_hw = &ixgbe_start_hw_82599,
@@ -2220,6 +2218,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.clear_vfta = &ixgbe_clear_vfta_generic,
.set_vfta = &ixgbe_set_vfta_generic,
.fc_enable = &ixgbe_fc_enable_generic,
+ .setup_fc = ixgbe_setup_fc_generic,
.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = &ixgbe_setup_sfp_modules_82599,
@@ -2227,6 +2226,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync,
.release_swfw_sync = &ixgbe_release_swfw_sync,
+ .init_swfw_sync = NULL,
.get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
.init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
.prot_autoc_read = &prot_autoc_read_82599,
@@ -2235,7 +2235,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.disable_rx = &ixgbe_disable_rx_generic,
};
-static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.init_params = &ixgbe_init_eeprom_params_generic,
.read = &ixgbe_read_eeprom_82599,
.read_buffer = &ixgbe_read_eeprom_buffer_82599,
@@ -2246,7 +2246,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
.update_checksum = &ixgbe_update_eeprom_checksum_generic,
};
-static struct ixgbe_phy_operations phy_ops_82599 = {
+static const struct ixgbe_phy_operations phy_ops_82599 = {
.identify = &ixgbe_identify_phy_82599,
.identify_sfp = &ixgbe_identify_module_generic,
.init = &ixgbe_init_phy_ops_82599,
@@ -2263,7 +2263,7 @@ static struct ixgbe_phy_operations phy_ops_82599 = {
.check_overtemp = &ixgbe_tn_check_overtemp,
};
-struct ixgbe_info ixgbe_82599_info = {
+const struct ixgbe_info ixgbe_82599_info = {
.mac = ixgbe_mac_82599EB,
.get_invariants = &ixgbe_get_invariants_82599,
.mac_ops = &mac_ops_82599,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 64045053e874..c47b605e8651 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -97,6 +97,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X540T:
case IXGBE_DEV_ID_X540T1:
case IXGBE_DEV_ID_X550T:
+ case IXGBE_DEV_ID_X550T1:
case IXGBE_DEV_ID_X550EM_X_10G_T:
supported = true;
break;
@@ -111,12 +112,12 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
}
/**
- * ixgbe_setup_fc - Set up flow control
+ * ixgbe_setup_fc_generic - Set up flow control
* @hw: pointer to hardware structure
*
* Called at init time to set up flow control.
**/
-static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
@@ -276,6 +277,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
s32 ret_val;
u32 ctrl_ext;
+ u16 device_caps;
/* Set the media type */
hw->phy.media_type = hw->mac.ops.get_media_type(hw);
@@ -296,10 +298,26 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
/* Setup flow control */
- ret_val = ixgbe_setup_fc(hw);
+ ret_val = hw->mac.ops.setup_fc(hw);
if (ret_val)
return ret_val;
+ /* Cashe bit indicating need for crosstalk fix */
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ hw->mac.ops.get_device_caps(hw, &device_caps);
+ if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR)
+ hw->need_crosstalk_fix = false;
+ else
+ hw->need_crosstalk_fix = true;
+ break;
+ default:
+ hw->need_crosstalk_fix = false;
+ break;
+ }
+
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
@@ -681,6 +699,7 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
{
struct ixgbe_bus_info *bus = &hw->bus;
+ u16 ee_ctrl_4;
u32 reg;
reg = IXGBE_READ_REG(hw, IXGBE_STATUS);
@@ -691,6 +710,13 @@ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw)
reg = IXGBE_READ_REG(hw, IXGBE_FACTPS(hw));
if (reg & IXGBE_FACTPS_LFS)
bus->func ^= 0x1;
+
+ /* Get MAC instance from EEPROM for configuring CS4227 */
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) {
+ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4);
+ bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >>
+ IXGBE_EE_CTRL_4_INST_ID_SHIFT;
+ }
}
/**
@@ -754,6 +780,9 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/* To turn on the LED, set mode to ON. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index);
@@ -772,6 +801,9 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index)
{
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/* To turn off the LED, set mode to OFF. */
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index);
@@ -816,8 +848,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
*/
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
- eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ eeprom->word_size = BIT(eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
}
if (eec & IXGBE_EEC_ADDR_SIZE)
@@ -1493,7 +1525,7 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
* Mask is used to shift "count" bits of "data" out to the EEPROM
* one bit at a time. Determine the starting bit based on count
*/
- mask = 0x01 << (count - 1);
+ mask = BIT(count - 1);
for (i = 0; i < count; i++) {
/*
@@ -1982,7 +2014,7 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
*/
vector_reg = (vector >> 5) & 0x7F;
vector_bit = vector & 0x1F;
- hw->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
+ hw->mac.mta_shadow[vector_reg] |= BIT(vector_bit);
}
/**
@@ -2648,7 +2680,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
**/
s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
{
- int secrxreg;
+ u32 secrxreg;
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS;
@@ -2689,6 +2721,9 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
bool locked = false;
s32 ret_val;
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
/*
* Link must be up to auto-blink the LEDs;
* Force it if link is down.
@@ -2732,6 +2767,9 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
bool locked = false;
s32 ret_val;
+ if (index > 3)
+ return IXGBE_ERR_PARAM;
+
ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
if (ret_val)
return ret_val;
@@ -2854,6 +2892,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
@@ -2911,16 +2950,18 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
mpsar_hi = 0;
}
} else if (vmdq < 32) {
- mpsar_lo &= ~(1 << vmdq);
+ mpsar_lo &= ~BIT(vmdq);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo);
} else {
- mpsar_hi &= ~(1 << (vmdq - 32));
+ mpsar_hi &= ~BIT(vmdq - 32);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi);
}
/* was that the last pool using this rar? */
- if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ if (mpsar_lo == 0 && mpsar_hi == 0 &&
+ rar != 0 && rar != hw->mac.san_mac_rar_index)
hw->mac.ops.clear_rar(hw, rar);
+
return 0;
}
@@ -2943,11 +2984,11 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
if (vmdq < 32) {
mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar));
- mpsar |= 1 << vmdq;
+ mpsar |= BIT(vmdq);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar);
} else {
mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
- mpsar |= 1 << (vmdq - 32);
+ mpsar |= BIT(vmdq - 32);
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar);
}
return 0;
@@ -2968,11 +3009,11 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq)
u32 rar = hw->mac.san_mac_rar_index;
if (vmdq < 32) {
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq);
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), BIT(vmdq));
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0);
} else {
IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0);
- IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32));
+ IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), BIT(vmdq - 32));
}
return 0;
@@ -3072,7 +3113,7 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* bits[4-0]: which bit in the register
*/
regidx = vlan / 32;
- vfta_delta = 1 << (vlan % 32);
+ vfta_delta = BIT(vlan % 32);
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx));
/* vfta_delta represents the difference between the current value
@@ -3103,12 +3144,12 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32));
/* set the pool bit */
- bits |= 1 << (vind % 32);
+ bits |= BIT(vind % 32);
if (vlan_on)
goto vlvf_update;
/* clear the pool bit */
- bits ^= 1 << (vind % 32);
+ bits ^= BIT(vind % 32);
if (!bits &&
!IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) {
@@ -3178,6 +3219,31 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix
+ * @hw: pointer to hardware structure
+ *
+ * Contains the logic to identify if we need to verify link for the
+ * crosstalk fix
+ **/
+static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw)
+{
+ /* Does FW say we need the fix */
+ if (!hw->need_crosstalk_fix)
+ return false;
+
+ /* Only consider SFP+ PHYs i.e. media type fiber */
+ switch (hw->mac.ops.get_media_type(hw)) {
+ case ixgbe_media_type_fiber:
+ case ixgbe_media_type_fiber_qsfp:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+/**
* ixgbe_check_mac_link_generic - Determine link and speed status
* @hw: pointer to hardware structure
* @speed: pointer to link speed
@@ -3192,6 +3258,35 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
u32 links_reg, links_orig;
u32 i;
+ /* If Crosstalk fix enabled do the sanity check of making sure
+ * the SFP+ cage is full.
+ */
+ if (ixgbe_need_crosstalk_fix(hw)) {
+ u32 sfp_cage_full;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82599EB:
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP2;
+ break;
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) &
+ IXGBE_ESDP_SDP0;
+ break;
+ default:
+ /* sanity check - No SFP+ devices here */
+ sfp_cage_full = false;
+ break;
+ }
+
+ if (!sfp_cage_full) {
+ *link_up = false;
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ return 0;
+ }
+ }
+
/* clear the old state */
links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS);
@@ -3300,43 +3395,25 @@ wwn_prefix_err:
/**
* ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
* @hw: pointer to hardware structure
- * @enable: enable or disable switch for anti-spoofing
- * @pf: Physical Function pool - do not enable anti-spoofing for the PF
+ * @enable: enable or disable switch for MAC anti-spoofing
+ * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing
*
**/
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
{
- int j;
- int pf_target_reg = pf >> 3;
- int pf_target_shift = pf % 8;
- u32 pfvfspoof = 0;
+ int vf_target_reg = vf >> 3;
+ int vf_target_shift = vf % 8;
+ u32 pfvfspoof;
if (hw->mac.type == ixgbe_mac_82598EB)
return;
+ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable)
- pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
-
- /*
- * PFVFSPOOF register array is size 8 with 8 bits assigned to
- * MAC anti-spoof enables in each register array element.
- */
- for (j = 0; j < pf_target_reg; j++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
- /*
- * The PF should be allowed to spoof so that it can support
- * emulation mode NICs. Do not set the bits assigned to the PF
- */
- pfvfspoof &= (1 << pf_target_shift) - 1;
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
-
- /*
- * Remaining pools belong to the PF so they do not need to have
- * anti-spoofing enabled.
- */
- for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0);
+ pfvfspoof |= BIT(vf_target_shift);
+ else
+ pfvfspoof &= ~BIT(vf_target_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
/**
@@ -3357,9 +3434,9 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable)
- pfvfspoof |= (1 << vf_target_shift);
+ pfvfspoof |= BIT(vf_target_shift);
else
- pfvfspoof &= ~(1 << vf_target_shift);
+ pfvfspoof &= ~BIT(vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
@@ -3483,18 +3560,27 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length)
* Communicates with the manageability block. On success return 0
* else return IXGBE_ERR_HOST_INTERFACE_COMMAND.
**/
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *buffer,
u32 length, u32 timeout,
bool return_data)
{
- u32 hicr, i, bi, fwsts;
u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
+ u32 hicr, i, bi, fwsts;
u16 buf_len, dword_len;
+ union {
+ struct ixgbe_hic_hdr hdr;
+ u32 u32arr[1];
+ } *bp = buffer;
+ s32 status;
- if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
+ if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
hw_dbg(hw, "Buffer length failure buffersize-%d.\n", length);
return IXGBE_ERR_HOST_INTERFACE_COMMAND;
}
+ /* Take management host interface semaphore */
+ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+ if (status)
+ return status;
/* Set bit 9 of FWSTS clearing FW reset indication */
fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS);
@@ -3502,26 +3588,27 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
/* Check that the host interface is enabled. */
hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
- if ((hicr & IXGBE_HICR_EN) == 0) {
+ if (!(hicr & IXGBE_HICR_EN)) {
hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
/* Calculate length in DWORDs. We must be DWORD aligned */
- if ((length % (sizeof(u32))) != 0) {
+ if (length % sizeof(u32)) {
hw_dbg(hw, "Buffer length failure, not aligned to dword");
- return IXGBE_ERR_INVALID_ARGUMENT;
+ status = IXGBE_ERR_INVALID_ARGUMENT;
+ goto rel_out;
}
dword_len = length >> 2;
- /*
- * The device driver writes the relevant command block
+ /* The device driver writes the relevant command block
* into the ram area.
*/
for (i = 0; i < dword_len; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG,
- i, cpu_to_le32(buffer[i]));
+ i, cpu_to_le32(bp->u32arr[i]));
/* Setting this bit tells the ARC that a new command is pending. */
IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C);
@@ -3534,44 +3621,49 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
}
/* Check command successful completion. */
- if ((timeout != 0 && i == timeout) ||
- (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
+ if ((timeout && i == timeout) ||
+ !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) {
hw_dbg(hw, "Command has failed with no status valid.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
if (!return_data)
- return 0;
+ goto rel_out;
/* Calculate length in DWORDs */
dword_len = hdr_size >> 2;
/* first pull in the header so we know the buffer length */
for (bi = 0; bi < dword_len; bi++) {
- buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- le32_to_cpus(&buffer[bi]);
+ bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ le32_to_cpus(&bp->u32arr[bi]);
}
/* If there is any thing in data position pull it in */
- buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
- if (buf_len == 0)
- return 0;
+ buf_len = bp->hdr.buf_len;
+ if (!buf_len)
+ goto rel_out;
- if (length < (buf_len + hdr_size)) {
+ if (length < round_up(buf_len, 4) + hdr_size) {
hw_dbg(hw, "Buffer not large enough for reply message.\n");
- return IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ status = IXGBE_ERR_HOST_INTERFACE_COMMAND;
+ goto rel_out;
}
/* Calculate length in DWORDs, add 3 for odd lengths */
dword_len = (buf_len + 3) >> 2;
- /* Pull in the rest of the buffer (bi is where we left off)*/
+ /* Pull in the rest of the buffer (bi is where we left off) */
for (; bi <= dword_len; bi++) {
- buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
- le32_to_cpus(&buffer[bi]);
+ bp->u32arr[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi);
+ le32_to_cpus(&bp->u32arr[bi]);
}
- return 0;
+rel_out:
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
+
+ return status;
}
/**
@@ -3594,13 +3686,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
int i;
s32 ret_val;
- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM))
- return IXGBE_ERR_SWFW_SYNC;
-
fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
- fw_cmd.port_num = (u8)hw->bus.func;
+ fw_cmd.port_num = hw->bus.func;
fw_cmd.ver_maj = maj;
fw_cmd.ver_min = min;
fw_cmd.ver_build = build;
@@ -3612,7 +3701,7 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
fw_cmd.pad2 = 0;
for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) {
- ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ ret_val = ixgbe_host_interface_command(hw, &fw_cmd,
sizeof(fw_cmd),
IXGBE_HI_COMMAND_TIMEOUT,
true);
@@ -3628,7 +3717,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
break;
}
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
return ret_val;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 2b9563137fd8..6d4c260d0cbd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -81,6 +81,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw);
+s32 ixgbe_setup_fc_generic(struct ixgbe_hw *);
bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw);
void ixgbe_fc_autoneg(struct ixgbe_hw *hw);
@@ -105,13 +106,13 @@ s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
+void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps);
s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
u8 build, u8 ver);
-s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
- u32 length, u32 timeout, bool return_data);
+s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, void *, u32 length,
+ u32 timeout, bool return_data);
void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
bool ixgbe_mng_present(struct ixgbe_hw *hw);
bool ixgbe_mng_enabled(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 02c7333a9c83..072ef3b5fc61 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -186,7 +186,7 @@ void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) {
if (tc_config[tc].dcb_pfc != pfc_disabled)
- *pfc_en |= 1 << tc;
+ *pfc_en |= BIT(tc);
}
}
@@ -232,7 +232,7 @@ void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction,
u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
{
struct tc_configuration *tc_config = &cfg->tc_config[0];
- u8 prio_mask = 1 << up;
+ u8 prio_mask = BIT(up);
u8 tc = cfg->num_tcs.pg_tcs;
/* If tc is 0 then DCB is likely not enabled or supported */
@@ -293,6 +293,7 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
bwgid, ptype, prio_tc);
default:
@@ -311,6 +312,7 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
default:
break;
@@ -368,6 +370,7 @@ s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max,
bwg_id, prio_type, prio_tc);
ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max,
@@ -398,6 +401,7 @@ void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
ixgbe_dcb_read_rtrup2tc_82599(hw, map);
break;
default:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index d3ba63f9ad37..b79e93a5b699 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -210,7 +210,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
- if (!(pfc_en & (1 << i))) {
+ if (!(pfc_en & BIT(i))) {
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0);
continue;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index b5cc989a3d23..1011d644978f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -248,7 +248,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
int enabled = 0;
for (j = 0; j < MAX_USER_PRIORITY; j++) {
- if ((prio_tc[j] == i) && (pfc_en & (1 << j))) {
+ if ((prio_tc[j] == i) && (pfc_en & BIT(j))) {
enabled = 1;
break;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 2707bda37418..b8fc3cfec831 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -62,7 +62,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
};
u8 up = dcb_getapp(adapter->netdev, &app);
- if (up && !(up & (1 << adapter->fcoe.up)))
+ if (up && !(up & BIT(adapter->fcoe.up)))
changes |= BIT_APP_UPCHG;
#endif
@@ -657,7 +657,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
- if (app_mask & (1 << adapter->fcoe.up))
+ if (app_mask & BIT(adapter->fcoe.up))
return 0;
adapter->fcoe.up = app->priority;
@@ -700,7 +700,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
app->protocol == ETH_P_FCOE) {
u8 app_mask = dcb_ieee_getapp_mask(dev, app);
- if (app_mask & (1 << adapter->fcoe.up))
+ if (app_mask & BIT(adapter->fcoe.up))
return 0;
adapter->fcoe.up = app_mask ?
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index b3530e1e3ce1..0d7209eb5abf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -533,10 +533,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
/* Flow Control */
regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
- regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
- regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
- regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
- regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
+ for (i = 0; i < 4; i++)
+ regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_FCTTV(i));
for (i = 0; i < 8; i++) {
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -547,6 +545,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
break;
@@ -660,6 +659,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS);
for (i = 0; i < 8; i++)
@@ -718,8 +718,10 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
- regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
- regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
+ regs_buff[942] = (u32)IXGBE_GET_STAT(adapter, gorc);
+ regs_buff[943] = (u32)(IXGBE_GET_STAT(adapter, gorc) >> 32);
+ regs_buff[944] = (u32)IXGBE_GET_STAT(adapter, gotc);
+ regs_buff[945] = (u32)(IXGBE_GET_STAT(adapter, gotc) >> 32);
for (i = 0; i < 8; i++)
regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
@@ -729,7 +731,8 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
- regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
+ regs_buff[961] = (u32)IXGBE_GET_STAT(adapter, tor);
+ regs_buff[962] = (u32)(IXGBE_GET_STAT(adapter, tor) >> 32);
regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
@@ -801,15 +804,11 @@ static void ixgbe_get_regs(struct net_device *netdev,
regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
- regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
- regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
- regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
- regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
+ for (i = 0; i < 4; i++)
+ regs_buff[1102 + i] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA(i));
regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
- regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
- regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
- regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
- regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
+ for (i = 0; i < 4; i++)
+ regs_buff[1107 + i] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA(i));
for (i = 0; i < 8; i++)
regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
@@ -1443,6 +1442,7 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
toggle = 0x7FFFF30F;
test = reg_test_82599;
break;
@@ -1583,7 +1583,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
/* Test each interrupt */
for (; i < 10; i++) {
/* Interrupt to test */
- mask = 1 << i;
+ mask = BIT(i);
if (!shared_int) {
/*
@@ -1681,6 +1681,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
reg_ctl &= ~IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
@@ -1720,6 +1721,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
reg_data |= IXGBE_DMATXCTL_TE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
@@ -1780,6 +1782,7 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
reg_data = IXGBE_READ_REG(hw, IXGBE_MACC);
reg_data |= IXGBE_MACC_FLU;
IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data);
@@ -2201,11 +2204,11 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
return 2;
case ETHTOOL_ID_ON:
- hw->mac.ops.led_on(hw, IXGBE_LED_ON);
+ hw->mac.ops.led_on(hw, hw->bus.func);
break;
case ETHTOOL_ID_OFF:
- hw->mac.ops.led_off(hw, IXGBE_LED_ON);
+ hw->mac.ops.led_off(hw, hw->bus.func);
break;
case ETHTOOL_ID_INACTIVE:
@@ -2988,9 +2991,15 @@ static int ixgbe_get_ts_info(struct net_device *dev,
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
+ /* we always support timestamping disabled */
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE);
+
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ info->rx_filters |= BIT(HWTSTAMP_FILTER_ALL);
+ /* fallthrough */
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
info->so_timestamping =
@@ -3007,14 +3016,13 @@ static int ixgbe_get_ts_info(struct net_device *dev,
info->phc_index = -1;
info->tx_types =
- (1 << HWTSTAMP_TX_OFF) |
- (1 << HWTSTAMP_TX_ON);
-
- info->rx_filters =
- (1 << HWTSTAMP_FILTER_NONE) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
- (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
- (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ BIT(HWTSTAMP_TX_OFF) |
+ BIT(HWTSTAMP_TX_ON);
+
+ info->rx_filters |=
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ BIT(HWTSTAMP_FILTER_PTP_V2_EVENT);
break;
default:
return ethtool_op_get_ts_info(dev, info);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index e771e764daa3..bcdc88444ceb 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -128,6 +128,7 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (num_tcs > 4) {
/*
* TCs : TC0/1 TC2/3 TC4-7
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 7df3fe29b210..b4f03748adc0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -50,18 +50,10 @@
#include <linux/if_bridge.h>
#include <linux/prefetch.h>
#include <scsi/fc/fc_fcoe.h>
-#include <net/vxlan.h>
+#include <net/udp_tunnel.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_gact.h>
-
-#ifdef CONFIG_OF
-#include <linux/of_net.h>
-#endif
-
-#ifdef CONFIG_SPARC
-#include <asm/idprom.h>
-#include <asm/prom.h>
-#endif
+#include <net/tc_act/tc_mirred.h>
#include "ixgbe.h"
#include "ixgbe_common.h"
@@ -79,10 +71,10 @@ char ixgbe_default_device_descr[] =
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
-#define DRV_VERSION "4.2.1-k"
+#define DRV_VERSION "4.4.0-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2015 Intel Corporation.";
+ "Copyright (c) 1999-2016 Intel Corporation.";
static const char ixgbe_overheat_msg[] = "Network adapter has been stopped because it has over heated. Restart the computer. If the problem persists, power off the system and replace the adapter";
@@ -92,6 +84,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_X540] = &ixgbe_X540_info,
[board_X550] = &ixgbe_X550_info,
[board_X550EM_x] = &ixgbe_X550EM_x_info,
+ [board_x550em_a] = &ixgbe_x550em_a_info,
};
/* ixgbe_pci_tbl - PCI Device ID Table
@@ -134,10 +127,17 @@ static const struct pci_device_id ixgbe_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), board_X550},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), board_X550},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), board_X550EM_x},
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), board_X550EM_x},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), board_x550em_a },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), board_x550em_a },
/* required last entry */
{0, }
};
@@ -372,6 +372,27 @@ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
if (ixgbe_removed(reg_addr))
return IXGBE_FAILED_READ_REG;
+ if (unlikely(hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M)) {
+ struct ixgbe_adapter *adapter;
+ int i;
+
+ for (i = 0; i < 200; ++i) {
+ value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY);
+ if (likely(!value))
+ goto writes_completed;
+ if (value == IXGBE_FAILED_READ_REG) {
+ ixgbe_remove_adapter(hw);
+ return IXGBE_FAILED_READ_REG;
+ }
+ udelay(5);
+ }
+
+ adapter = hw->back;
+ e_warn(hw, "register writes incomplete %08x\n", value);
+ }
+
+writes_completed:
value = readl(reg_addr + reg);
if (unlikely(value == IXGBE_FAILED_READ_REG))
ixgbe_check_remove(hw, reg);
@@ -588,7 +609,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
pr_info("%-15s %016lX %016lX %016lX\n",
netdev->name,
netdev->state,
- netdev->trans_start,
+ dev_trans_start(netdev),
netdev->last_rx);
}
@@ -869,6 +890,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (direction == -1) {
/* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -907,6 +929,7 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask = (qmask & 0xFFFFFFFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
mask = (qmask >> 32);
@@ -1087,9 +1110,40 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_tx_maxrate - callback to set the maximum per-queue bitrate
+ **/
+static int ixgbe_tx_maxrate(struct net_device *netdev,
+ int queue_index, u32 maxrate)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 bcnrc_val = ixgbe_link_mbps(adapter);
+
+ if (!maxrate)
+ return 0;
+
+ /* Calculate the rate factor values to set */
+ bcnrc_val <<= IXGBE_RTTBCNRC_RF_INT_SHIFT;
+ bcnrc_val /= maxrate;
+
+ /* clear everything but the rate factor */
+ bcnrc_val &= IXGBE_RTTBCNRC_RF_INT_MASK |
+ IXGBE_RTTBCNRC_RF_DEC_MASK;
+
+ /* enable the rate scheduler */
+ bcnrc_val |= IXGBE_RTTBCNRC_RS_ENA;
+
+ IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, queue_index);
+ IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, bcnrc_val);
+
+ return 0;
+}
+
+/**
* ixgbe_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: structure containing interrupt and ring information
* @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
**/
static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *tx_ring, int napi_budget)
@@ -2192,7 +2246,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
/* Populate MSIX to EITR Select */
if (adapter->num_vfs > 32) {
- u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+ u32 eitrsel = BIT(adapter->num_vfs - 32) - 1;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
}
@@ -2222,6 +2276,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
ixgbe_set_ivar(adapter, -1, 1, v_idx);
break;
default:
@@ -2333,6 +2388,7 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/*
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt
@@ -2494,6 +2550,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
return false;
case ixgbe_mac_82599EB:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
switch (hw->mac.ops.get_media_type(hw)) {
case ixgbe_media_type_fiber:
case ixgbe_media_type_fiber_qsfp:
@@ -2568,6 +2625,7 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
@@ -2596,6 +2654,7 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask = (qmask & 0xFFFFFFFF);
if (mask)
IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
@@ -2631,6 +2690,7 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
mask |= IXGBE_EIMS_TS;
break;
default:
@@ -2646,7 +2706,10 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
- if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP)
+ case ixgbe_mac_x550em_a:
+ if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP ||
+ adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP ||
+ adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N)
mask |= IXGBE_EIMS_GPI_SDP0(&adapter->hw);
if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t)
mask |= IXGBE_EICR_GPI_SDP0_X540;
@@ -2704,6 +2767,7 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (hw->phy.type == ixgbe_phy_x550em_ext_t &&
(eicr & IXGBE_EICR_GPI_SDP0_X540)) {
adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT;
@@ -2786,8 +2850,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
ixgbe_update_dca(q_vector);
#endif
- ixgbe_for_each_ring(ring, q_vector->tx)
- clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring, budget);
+ ixgbe_for_each_ring(ring, q_vector->tx) {
+ if (!ixgbe_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
/* Exit if we are called by netpoll or busy polling is active */
if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector))
@@ -2805,7 +2871,8 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
per_ring_budget);
work_done += cleaned;
- clean_complete &= (cleaned < per_ring_budget);
+ if (cleaned >= per_ring_budget)
+ clean_complete = false;
}
ixgbe_qv_unlock_napi(q_vector);
@@ -2818,9 +2885,9 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
if (adapter->rx_itr_setting & 1)
ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
- ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
+ ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
- return 0;
+ return min(work_done, budget - 1);
}
/**
@@ -2937,6 +3004,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (eicr & IXGBE_EICR_ECC) {
e_info(link, "Received ECC Err, initiating reset\n");
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
@@ -3016,7 +3084,7 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
free_irq(entry->vector, q_vector);
}
- free_irq(adapter->msix_entries[vector++].vector, adapter);
+ free_irq(adapter->msix_entries[vector].vector, adapter);
}
/**
@@ -3033,6 +3101,7 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
@@ -3109,15 +3178,15 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
* currently 40.
*/
if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR))
- txdctl |= (1 << 16); /* WTHRESH = 1 */
+ txdctl |= 1u << 16; /* WTHRESH = 1 */
else
- txdctl |= (8 << 16); /* WTHRESH = 8 */
+ txdctl |= 8u << 16; /* WTHRESH = 8 */
/*
* Setting PTHRESH to 32 both improves performance
* and avoids a TX hang with DFP enabled
*/
- txdctl |= (1 << 8) | /* HTHRESH = 1 */
+ txdctl |= (1u << 8) | /* HTHRESH = 1 */
32; /* PTHRESH = 32 */
/* reinitialize flowdirector state */
@@ -3669,9 +3738,9 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
return;
if (rss_i > 3)
- psrtype |= 2 << 29;
+ psrtype |= 2u << 29;
else if (rss_i > 1)
- psrtype |= 1 << 29;
+ psrtype |= 1u << 29;
for_each_set_bit(pool, &adapter->fwd_bitmask, 32)
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
@@ -3698,9 +3767,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
/* Enable only the PF's pool for Tx/Rx */
- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), GENMASK(31, vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1);
- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), GENMASK(31, vf_shift));
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1);
if (adapter->bridge_mode == BRIDGE_MODE_VEB)
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
@@ -3729,34 +3798,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext);
-
- /* Enable MAC Anti-Spoofing */
- hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
- adapter->num_vfs);
-
- /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
- * calling set_ethertype_anti_spoofing for each VF in loop below
- */
- if (hw->mac.ops.set_ethertype_anti_spoofing) {
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
- (IXGBE_ETQF_FILTER_EN |
- IXGBE_ETQF_TX_ANTISPOOF |
- IXGBE_ETH_P_LLDP));
-
- IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
- (IXGBE_ETQF_FILTER_EN |
- IXGBE_ETQF_TX_ANTISPOOF |
- ETH_P_PAUSE));
- }
-
- /* For VFs that have spoof checking turned off */
for (i = 0; i < adapter->num_vfs; i++) {
- if (!adapter->vfinfo[i].spoofchk_enabled)
- ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false);
-
- /* enable ethertype anti spoofing if hw supports it */
- if (hw->mac.ops.set_ethertype_anti_spoofing)
- hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i);
+ /* configure spoof checking */
+ ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i,
+ adapter->vfinfo[i].spoofchk_enabled);
/* Enable/Disable RSS query feature */
ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i,
@@ -3832,6 +3877,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
if (adapter->num_vfs)
rdrxctl |= IXGBE_RDRXCTL_PSP;
/* fall through for older HW */
@@ -3908,7 +3954,9 @@ static int ixgbe_vlan_rx_add_vid(struct net_device *netdev,
struct ixgbe_hw *hw = &adapter->hw;
/* add VID to filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, true);
+ if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+ hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true, !!vid);
+
set_bit(vid, adapter->active_vlans);
return 0;
@@ -3947,7 +3995,7 @@ void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid)
* entry other than the PF.
*/
word = idx * 2 + (VMDQ_P(0) / 32);
- bits = ~(1 << (VMDQ_P(0)) % 32);
+ bits = ~BIT(VMDQ_P(0) % 32);
bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
/* Disable the filter so this falls into the default pool. */
@@ -3965,9 +4013,7 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
struct ixgbe_hw *hw = &adapter->hw;
/* remove VID from filter table */
- if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)
- ixgbe_update_pf_promisc_vlvf(adapter, vid);
- else
+ if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), false, true);
clear_bit(vid, adapter->active_vlans);
@@ -3995,6 +4041,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4031,6 +4078,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
for (i = 0; i < adapter->num_rx_queues; i++) {
struct ixgbe_ring *ring = adapter->rx_ring[i];
@@ -4052,19 +4100,21 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
u32 vlnctrl, i;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
default:
if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
break;
/* fall through */
case ixgbe_mac_82598EB:
/* legacy case, we can just disable VLAN filtering */
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
+ vlnctrl &= ~IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
return;
}
@@ -4076,12 +4126,16 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
/* Set flag so we don't redo unnecessary work */
adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
+ /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+
/* Add PF to all active pools */
for (i = IXGBE_VLVF_ENTRIES; --i;) {
u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
u32 vlvfb = IXGBE_READ_REG(hw, reg_offset);
- vlvfb |= 1 << (VMDQ_P(0) % 32);
+ vlvfb |= BIT(VMDQ_P(0) % 32);
IXGBE_WRITE_REG(hw, reg_offset, vlvfb);
}
@@ -4111,7 +4165,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
if (vlvf) {
/* record VLAN ID in VFTA */
- vfta[(vid - vid_start) / 32] |= 1 << (vid % 32);
+ vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
/* if PF is part of this then continue */
if (test_bit(vid, adapter->active_vlans))
@@ -4120,7 +4174,7 @@ static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset)
/* remove PF from the pool */
word = i * 2 + VMDQ_P(0) / 32;
- bits = ~(1 << (VMDQ_P(0) % 32));
+ bits = ~BIT(VMDQ_P(0) % 32);
bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits);
}
@@ -4142,20 +4196,22 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
u32 vlnctrl, i;
+ /* Set VLAN filtering to enabled */
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
default:
if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)
break;
/* fall through */
case ixgbe_mac_82598EB:
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
return;
}
@@ -4172,11 +4228,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
{
- u16 vid;
+ u16 vid = 1;
ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
- for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+ for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
}
@@ -4426,6 +4482,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ netdev_features_t features = netdev->features;
int count;
/* Check for Promiscuous and All Multicast modes */
@@ -4443,14 +4500,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
vmolr |= IXGBE_VMOLR_MPE;
- ixgbe_vlan_promisc_enable(adapter);
+ features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
}
hw->addr_ctrl.user_set_promisc = false;
- ixgbe_vlan_promisc_disable(adapter);
}
/*
@@ -4483,7 +4539,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
}
/* This is useful for sniffing bad packets. */
- if (adapter->netdev->features & NETIF_F_RXALL) {
+ if (features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic
* in e1000e_set_rx_mode */
fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */
@@ -4496,10 +4552,15 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
ixgbe_vlan_strip_enable(adapter);
else
ixgbe_vlan_strip_disable(adapter);
+
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ ixgbe_vlan_promisc_disable(adapter);
+ else
+ ixgbe_vlan_promisc_enable(adapter);
}
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -4530,6 +4591,7 @@ static void ixgbe_clear_vxlan_port(struct ixgbe_adapter *adapter)
switch (adapter->hw.mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_VXLANCTRL, 0);
adapter->vxlan_port = 0;
break;
@@ -4630,6 +4692,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
dv_id = IXGBE_DV_X540(link, tc);
break;
default:
@@ -4690,6 +4753,7 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
dv_id = IXGBE_LOW_DV_X540(tc);
break;
default:
@@ -4805,9 +4869,9 @@ static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
return;
if (rss_i > 3)
- psrtype |= 2 << 29;
+ psrtype |= 2u << 29;
else if (rss_i > 1)
- psrtype |= 1 << 29;
+ psrtype |= 1u << 29;
IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype);
}
@@ -4871,7 +4935,7 @@ static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter,
/* shutdown specific queue receive and wait for dma to settle */
ixgbe_disable_rx_queue(adapter, rx_ring);
usleep_range(10000, 20000);
- ixgbe_irq_disable_queues(adapter, ((u64)1 << index));
+ ixgbe_irq_disable_queues(adapter, BIT_ULL(index));
ixgbe_clean_rx_ring(rx_ring);
rx_ring->l2_accel_priv = NULL;
}
@@ -5106,6 +5170,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
default:
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
@@ -5156,6 +5221,7 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
gpie |= IXGBE_SDP1_GPIEN_8259X | IXGBE_SDP2_GPIEN_8259X;
break;
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
gpie |= IXGBE_SDP0_GPIEN_X540;
break;
default:
@@ -5228,7 +5294,7 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
{
WARN_ON(in_interrupt());
/* put off any impending NetWatchDogTimeout */
- adapter->netdev->trans_start = jiffies;
+ netif_trans_update(adapter->netdev);
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
usleep_range(1000, 2000);
@@ -5467,6 +5533,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
~IXGBE_DMATXCTL_TE));
@@ -5498,6 +5565,58 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
ixgbe_tx_timeout_reset(adapter);
}
+#ifdef CONFIG_IXGBE_DCB
+static void ixgbe_init_dcb(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct tc_configuration *tc;
+ int j;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ case ixgbe_mac_82599EB:
+ adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
+ break;
+ case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
+ adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
+ break;
+ case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+ default:
+ adapter->dcb_cfg.num_tcs.pg_tcs = DEF_TRAFFIC_CLASS;
+ adapter->dcb_cfg.num_tcs.pfc_tcs = DEF_TRAFFIC_CLASS;
+ break;
+ }
+
+ /* Configure DCB traffic classes */
+ for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
+ tc = &adapter->dcb_cfg.tc_config[j];
+ tc->path[DCB_TX_CONFIG].bwg_id = 0;
+ tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
+ tc->path[DCB_RX_CONFIG].bwg_id = 0;
+ tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
+ tc->dcb_pfc = pfc_disabled;
+ }
+
+ /* Initialize default user to priority mapping, UPx->TC0 */
+ tc = &adapter->dcb_cfg.tc_config[0];
+ tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
+ tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
+
+ adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
+ adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
+ adapter->dcb_cfg.pfc_mode_enable = false;
+ adapter->dcb_set_bitmap = 0x00;
+ if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
+ adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
+ memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
+ sizeof(adapter->temp_dcb_cfg));
+}
+#endif
+
/**
* ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
* @adapter: board private structure to initialize
@@ -5512,10 +5631,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
unsigned int rss, fdir;
u32 fwsm;
-#ifdef CONFIG_IXGBE_DCB
- int j;
- struct tc_configuration *tc;
-#endif
+ int i;
/* PCI config space info */
@@ -5537,6 +5653,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCA
adapter->flags |= IXGBE_FLAG_DCA_CAPABLE;
#endif
+#ifdef CONFIG_IXGBE_DCB
+ adapter->flags |= IXGBE_FLAG_DCB_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
+#endif
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
@@ -5547,7 +5667,14 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
/* initialize static ixgbe jump table entries */
- adapter->jump_tables[0] = ixgbe_ipv4_fields;
+ adapter->jump_tables[0] = kzalloc(sizeof(*adapter->jump_tables[0]),
+ GFP_KERNEL);
+ if (!adapter->jump_tables[0])
+ return -ENOMEM;
+ adapter->jump_tables[0]->mat = ixgbe_ipv4_fields;
+
+ for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++)
+ adapter->jump_tables[i] = NULL;
adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
hw->mac.num_rar_entries,
@@ -5585,13 +5712,22 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
break;
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
+#ifdef CONFIG_IXGBE_DCB
+ adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE;
+#endif
+#ifdef IXGBE_FCOE
+ adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE;
+#ifdef CONFIG_IXGBE_DCB
+ adapter->fcoe.up = 0;
+#endif /* IXGBE_DCB */
+#endif /* IXGBE_FCOE */
+ /* Fall Through */
case ixgbe_mac_X550:
#ifdef CONFIG_IXGBE_DCA
adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE;
#endif
-#ifdef CONFIG_IXGBE_VXLAN
adapter->flags |= IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE;
-#endif
break;
default:
break;
@@ -5606,42 +5742,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
spin_lock_init(&adapter->fdir_perfect_lock);
#ifdef CONFIG_IXGBE_DCB
- switch (hw->mac.type) {
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
- adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
- adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
- break;
- default:
- adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
- adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
- break;
- }
-
- /* Configure DCB traffic classes */
- for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
- tc = &adapter->dcb_cfg.tc_config[j];
- tc->path[DCB_TX_CONFIG].bwg_id = 0;
- tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
- tc->path[DCB_RX_CONFIG].bwg_id = 0;
- tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
- tc->dcb_pfc = pfc_disabled;
- }
-
- /* Initialize default user to priority mapping, UPx->TC0 */
- tc = &adapter->dcb_cfg.tc_config[0];
- tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
- tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
-
- adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
- adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
- adapter->dcb_cfg.pfc_mode_enable = false;
- adapter->dcb_set_bitmap = 0x00;
- adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
- memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg,
- sizeof(adapter->temp_dcb_cfg));
-
+ ixgbe_init_dcb(adapter);
#endif
/* default flow control settings */
@@ -6044,9 +6145,7 @@ int ixgbe_open(struct net_device *netdev)
ixgbe_up_complete(adapter);
ixgbe_clear_vxlan_port(adapter);
-#ifdef CONFIG_IXGBE_VXLAN
- vxlan_get_rx_port(netdev);
-#endif
+ udp_tunnel_get_rx_info(netdev);
return 0;
@@ -6217,6 +6316,7 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
pci_wake_from_d3(pdev, !!wufc);
break;
default:
@@ -6352,6 +6452,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
hwstats->pxonrxc[i] +=
IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
break;
@@ -6367,7 +6468,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
if ((hw->mac.type == ixgbe_mac_82599EB) ||
(hw->mac.type == ixgbe_mac_X540) ||
(hw->mac.type == ixgbe_mac_X550) ||
- (hw->mac.type == ixgbe_mac_X550EM_x)) {
+ (hw->mac.type == ixgbe_mac_X550EM_x) ||
+ (hw->mac.type == ixgbe_mac_x550em_a)) {
hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i));
IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */
hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(i));
@@ -6392,6 +6494,7 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/* OS2BMC stats are X540 and later */
hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC);
hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC);
@@ -6562,7 +6665,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_q_vectors; i++) {
struct ixgbe_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring)
- eics |= ((u64)1 << i);
+ eics |= BIT_ULL(i);
}
}
@@ -6662,6 +6765,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
case ixgbe_mac_82599EB: {
u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
@@ -7121,12 +7225,12 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter);
return;
}
-#ifdef CONFIG_IXGBE_VXLAN
if (adapter->flags2 & IXGBE_FLAG2_VXLAN_REREG_NEEDED) {
+ rtnl_lock();
adapter->flags2 &= ~IXGBE_FLAG2_VXLAN_REREG_NEEDED;
- vxlan_get_rx_port(adapter->netdev);
+ udp_tunnel_get_rx_info(adapter->netdev);
+ rtnl_unlock();
}
-#endif /* CONFIG_IXGBE_VXLAN */
ixgbe_reset_subtask(adapter);
ixgbe_phy_interrupt_subtask(adapter);
ixgbe_sfp_detection_subtask(adapter);
@@ -7148,9 +7252,18 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
u8 *hdr_len)
{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb;
- u32 vlan_macip_lens, type_tucmd;
- u32 mss_l4len_idx, l4len;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -7163,46 +7276,52 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
if (err < 0)
return err;
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+ ip.v4->tot_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM |
IXGBE_TX_FLAGS_IPV4;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ } else {
+ ip.v6->payload_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM;
}
- /* compute header lengths */
- l4len = tcp_hdrlen(skb);
- *hdr_len = skb_transport_offset(skb) + l4len;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
+
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
/* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* mss_l4len_id: use 0 as index for TSO */
- mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
- vlan_macip_lens = skb_network_header_len(skb);
- vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
@@ -7211,103 +7330,61 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
return 1;
}
+static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb)
+{
+ unsigned int offset = 0;
+
+ ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
+
+ return offset == skb_checksum_start_offset(skb);
+}
+
static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
- u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) {
- if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
- !(first->tx_flags & IXGBE_TX_FLAGS_CC))
+csum_failed:
+ if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN |
+ IXGBE_TX_FLAGS_CC)))
return;
- vlan_macip_lens = skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT;
- } else {
- u8 l4_hdr = 0;
- union {
- struct iphdr *ipv4;
- struct ipv6hdr *ipv6;
- u8 *raw;
- } network_hdr;
- union {
- struct tcphdr *tcphdr;
- u8 *raw;
- } transport_hdr;
- __be16 frag_off;
-
- if (skb->encapsulation) {
- network_hdr.raw = skb_inner_network_header(skb);
- transport_hdr.raw = skb_inner_transport_header(skb);
- vlan_macip_lens = skb_inner_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT;
- } else {
- network_hdr.raw = skb_network_header(skb);
- transport_hdr.raw = skb_transport_header(skb);
- vlan_macip_lens = skb_network_offset(skb) <<
- IXGBE_ADVTXD_MACLEN_SHIFT;
- }
+ goto no_csum;
+ }
- /* use first 4 bits to determine IP version */
- switch (network_hdr.ipv4->version) {
- case IPVERSION:
- vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
- type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
- l4_hdr = network_hdr.ipv4->protocol;
- break;
- case 6:
- vlan_macip_lens |= transport_hdr.raw - network_hdr.raw;
- l4_hdr = network_hdr.ipv6->nexthdr;
- if (likely((transport_hdr.raw - network_hdr.raw) ==
- sizeof(struct ipv6hdr)))
- break;
- ipv6_skip_exthdr(skb, network_hdr.raw - skb->data +
- sizeof(struct ipv6hdr),
- &l4_hdr, &frag_off);
- if (unlikely(frag_off))
- l4_hdr = NEXTHDR_FRAGMENT;
- break;
- default:
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ /* fall through */
+ case offsetof(struct udphdr, check):
+ break;
+ case offsetof(struct sctphdr, checksum):
+ /* validate that this is actually an SCTP request */
+ if (((first->protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
+ ((first->protocol == htons(ETH_P_IPV6)) &&
+ ixgbe_ipv6_csum_is_sctp(skb))) {
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
}
-
- switch (l4_hdr) {
- case IPPROTO_TCP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_SCTP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- mss_l4len_idx = sizeof(struct sctphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_UDP:
- mss_l4len_idx = sizeof(struct udphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum, version=%d, l4 proto=%x\n",
- network_hdr.ipv4->version, l4_hdr);
- }
- skb_checksum_help(skb);
- goto no_csum;
- }
-
- /* update TX checksum flag */
- first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ /* fall through */
+ default:
+ skb_checksum_help(skb);
+ goto csum_failed;
}
+ /* update TX checksum flag */
+ first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ vlan_macip_lens = skb_checksum_start_offset(skb) -
+ skb_network_offset(skb);
no_csum:
/* vlan_macip_lens: MACLEN, VLAN tag */
+ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
- type_tucmd, mss_l4len_idx);
+ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0);
}
#define IXGBE_SET_FLAG(_input, _flag, _result) \
@@ -7581,7 +7658,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
/* snag network header to get L4 type and address */
skb = first->skb;
hdr.network = skb_network_header(skb);
-#ifdef CONFIG_IXGBE_VXLAN
if (skb->encapsulation &&
first->protocol == htons(ETH_P_IP) &&
hdr.ipv4->protocol != IPPROTO_UDP) {
@@ -7592,7 +7668,6 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
udp_hdr(skb)->dest == adapter->vxlan_port)
hdr.network = skb_inner_network_header(skb);
}
-#endif /* CONFIG_IXGBE_VXLAN */
/* Currently only IPv4/IPv6 with TCP is supported */
switch (hdr.ipv4->version) {
@@ -8192,14 +8267,53 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
static int ixgbe_delete_clsu32(struct ixgbe_adapter *adapter,
struct tc_cls_u32_offload *cls)
{
+ u32 hdl = cls->knode.handle;
u32 uhtid = TC_U32_USERHTID(cls->knode.handle);
- u32 loc;
- int err;
+ u32 loc = cls->knode.handle & 0xfffff;
+ int err = 0, i, j;
+ struct ixgbe_jump_table *jump = NULL;
+
+ if (loc > IXGBE_MAX_HW_ENTRIES)
+ return -EINVAL;
if ((uhtid != 0x800) && (uhtid >= IXGBE_MAX_LINK_HANDLE))
return -EINVAL;
- loc = cls->knode.handle & 0xfffff;
+ /* Clear this filter in the link data it is associated with */
+ if (uhtid != 0x800) {
+ jump = adapter->jump_tables[uhtid];
+ if (!jump)
+ return -EINVAL;
+ if (!test_bit(loc - 1, jump->child_loc_map))
+ return -EINVAL;
+ clear_bit(loc - 1, jump->child_loc_map);
+ }
+
+ /* Check if the filter being deleted is a link */
+ for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
+ jump = adapter->jump_tables[i];
+ if (jump && jump->link_hdl == hdl) {
+ /* Delete filters in the hardware in the child hash
+ * table associated with this link
+ */
+ for (j = 0; j < IXGBE_MAX_HW_ENTRIES; j++) {
+ if (!test_bit(j, jump->child_loc_map))
+ continue;
+ spin_lock(&adapter->fdir_perfect_lock);
+ err = ixgbe_update_ethtool_fdir_entry(adapter,
+ NULL,
+ j + 1);
+ spin_unlock(&adapter->fdir_perfect_lock);
+ clear_bit(j, jump->child_loc_map);
+ }
+ /* Remove resources for this link */
+ kfree(jump->input);
+ kfree(jump->mask);
+ kfree(jump);
+ adapter->jump_tables[i] = NULL;
+ return err;
+ }
+ }
spin_lock(&adapter->fdir_perfect_lock);
err = ixgbe_update_ethtool_fdir_entry(adapter, NULL, loc);
@@ -8238,6 +8352,136 @@ static int ixgbe_configure_clsu32_del_hnode(struct ixgbe_adapter *adapter,
return 0;
}
+#ifdef CONFIG_NET_CLS_ACT
+static int handle_redirect_action(struct ixgbe_adapter *adapter, int ifindex,
+ u8 *queue, u64 *action)
+{
+ unsigned int num_vfs = adapter->num_vfs, vf;
+ struct net_device *upper;
+ struct list_head *iter;
+
+ /* redirect to a SRIOV VF */
+ for (vf = 0; vf < num_vfs; ++vf) {
+ upper = pci_get_drvdata(adapter->vfinfo[vf].vfdev);
+ if (upper->ifindex == ifindex) {
+ if (adapter->num_rx_pools > 1)
+ *queue = vf * 2;
+ else
+ *queue = vf * adapter->num_rx_queues_per_pool;
+
+ *action = vf + 1;
+ *action <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ return 0;
+ }
+ }
+
+ /* redirect to a offloaded macvlan netdev */
+ netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) {
+ if (netif_is_macvlan(upper)) {
+ struct macvlan_dev *dfwd = netdev_priv(upper);
+ struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv;
+
+ if (vadapter && vadapter->netdev->ifindex == ifindex) {
+ *queue = adapter->rx_ring[vadapter->rx_base_queue]->reg_idx;
+ *action = *queue;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+ struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+ const struct tc_action *a;
+ LIST_HEAD(actions);
+ int err;
+
+ if (tc_no_actions(exts))
+ return -EINVAL;
+
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+
+ /* Drop action */
+ if (is_tcf_gact_shot(a)) {
+ *action = IXGBE_FDIR_DROP_QUEUE;
+ *queue = IXGBE_FDIR_DROP_QUEUE;
+ return 0;
+ }
+
+ /* Redirect to a VF or a offloaded macvlan */
+ if (is_tcf_mirred_redirect(a)) {
+ int ifindex = tcf_mirred_ifindex(a);
+
+ err = handle_redirect_action(adapter, ifindex, queue,
+ action);
+ if (err == 0)
+ return err;
+ }
+ }
+
+ return -EINVAL;
+}
+#else
+static int parse_tc_actions(struct ixgbe_adapter *adapter,
+ struct tcf_exts *exts, u64 *action, u8 *queue)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_NET_CLS_ACT */
+
+static int ixgbe_clsu32_build_input(struct ixgbe_fdir_filter *input,
+ union ixgbe_atr_input *mask,
+ struct tc_cls_u32_offload *cls,
+ struct ixgbe_mat_field *field_ptr,
+ struct ixgbe_nexthdr *nexthdr)
+{
+ int i, j, off;
+ __be32 val, m;
+ bool found_entry = false, found_jump_field = false;
+
+ for (i = 0; i < cls->knode.sel->nkeys; i++) {
+ off = cls->knode.sel->keys[i].off;
+ val = cls->knode.sel->keys[i].val;
+ m = cls->knode.sel->keys[i].mask;
+
+ for (j = 0; field_ptr[j].val; j++) {
+ if (field_ptr[j].off == off) {
+ field_ptr[j].val(input, mask, val, m);
+ input->filter.formatted.flow_type |=
+ field_ptr[j].type;
+ found_entry = true;
+ break;
+ }
+ }
+ if (nexthdr) {
+ if (nexthdr->off == cls->knode.sel->keys[i].off &&
+ nexthdr->val == cls->knode.sel->keys[i].val &&
+ nexthdr->mask == cls->knode.sel->keys[i].mask)
+ found_jump_field = true;
+ else
+ continue;
+ }
+ }
+
+ if (nexthdr && !found_jump_field)
+ return -EINVAL;
+
+ if (!found_entry)
+ return 0;
+
+ mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
+ IXGBE_ATR_L4TYPE_MASK;
+
+ if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
+ mask->formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+
+ return 0;
+}
+
static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
__be16 protocol,
struct tc_cls_u32_offload *cls)
@@ -8245,16 +8489,13 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
u32 loc = cls->knode.handle & 0xfffff;
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_mat_field *field_ptr;
- struct ixgbe_fdir_filter *input;
- union ixgbe_atr_input mask;
-#ifdef CONFIG_NET_CLS_ACT
- const struct tc_action *a;
-#endif
- int i, err = 0;
+ struct ixgbe_fdir_filter *input = NULL;
+ union ixgbe_atr_input *mask = NULL;
+ struct ixgbe_jump_table *jump = NULL;
+ int i, err = -EINVAL;
u8 queue;
u32 uhtid, link_uhtid;
- memset(&mask, 0, sizeof(union ixgbe_atr_input));
uhtid = TC_U32_USERHTID(cls->knode.handle);
link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
@@ -8266,38 +8507,11 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
* headers when needed.
*/
if (protocol != htons(ETH_P_IP))
- return -EINVAL;
-
- if (link_uhtid) {
- struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
-
- if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
- return -EINVAL;
-
- if (!test_bit(link_uhtid - 1, &adapter->tables))
- return -EINVAL;
-
- for (i = 0; nexthdr[i].jump; i++) {
- if (nexthdr->o != cls->knode.sel->offoff ||
- nexthdr->s != cls->knode.sel->offshift ||
- nexthdr->m != cls->knode.sel->offmask ||
- /* do not support multiple key jumps its just mad */
- cls->knode.sel->nkeys > 1)
- return -EINVAL;
-
- if (nexthdr->off != cls->knode.sel->keys[0].off ||
- nexthdr->val != cls->knode.sel->keys[0].val ||
- nexthdr->mask != cls->knode.sel->keys[0].mask)
- return -EINVAL;
-
- adapter->jump_tables[link_uhtid] = nexthdr->jump;
- }
- return 0;
- }
+ return err;
if (loc >= ((1024 << adapter->fdir_pballoc) - 2)) {
e_err(drv, "Location out of range\n");
- return -EINVAL;
+ return err;
}
/* cls u32 is a graph starting at root node 0x800. The driver tracks
@@ -8308,87 +8522,154 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
* this function _should_ be generic try not to hardcode values here.
*/
if (uhtid == 0x800) {
- field_ptr = adapter->jump_tables[0];
+ field_ptr = (adapter->jump_tables[0])->mat;
} else {
if (uhtid >= IXGBE_MAX_LINK_HANDLE)
- return -EINVAL;
-
- field_ptr = adapter->jump_tables[uhtid];
+ return err;
+ if (!adapter->jump_tables[uhtid])
+ return err;
+ field_ptr = (adapter->jump_tables[uhtid])->mat;
}
if (!field_ptr)
- return -EINVAL;
+ return err;
- input = kzalloc(sizeof(*input), GFP_KERNEL);
- if (!input)
- return -ENOMEM;
+ /* At this point we know the field_ptr is valid and need to either
+ * build cls_u32 link or attach filter. Because adding a link to
+ * a handle that does not exist is invalid and the same for adding
+ * rules to handles that don't exist.
+ */
- for (i = 0; i < cls->knode.sel->nkeys; i++) {
- int off = cls->knode.sel->keys[i].off;
- __be32 val = cls->knode.sel->keys[i].val;
- __be32 m = cls->knode.sel->keys[i].mask;
- bool found_entry = false;
- int j;
+ if (link_uhtid) {
+ struct ixgbe_nexthdr *nexthdr = ixgbe_ipv4_jumps;
- for (j = 0; field_ptr[j].val; j++) {
- if (field_ptr[j].off == off) {
- field_ptr[j].val(input, &mask, val, m);
- input->filter.formatted.flow_type |=
- field_ptr[j].type;
- found_entry = true;
+ if (link_uhtid >= IXGBE_MAX_LINK_HANDLE)
+ return err;
+
+ if (!test_bit(link_uhtid - 1, &adapter->tables))
+ return err;
+
+ /* Multiple filters as links to the same hash table are not
+ * supported. To add a new filter with the same next header
+ * but different match/jump conditions, create a new hash table
+ * and link to it.
+ */
+ if (adapter->jump_tables[link_uhtid] &&
+ (adapter->jump_tables[link_uhtid])->link_hdl) {
+ e_err(drv, "Link filter exists for link: %x\n",
+ link_uhtid);
+ return err;
+ }
+
+ for (i = 0; nexthdr[i].jump; i++) {
+ if (nexthdr[i].o != cls->knode.sel->offoff ||
+ nexthdr[i].s != cls->knode.sel->offshift ||
+ nexthdr[i].m != cls->knode.sel->offmask)
+ return err;
+
+ jump = kzalloc(sizeof(*jump), GFP_KERNEL);
+ if (!jump)
+ return -ENOMEM;
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+ if (!input) {
+ err = -ENOMEM;
+ goto free_jump;
+ }
+ mask = kzalloc(sizeof(*mask), GFP_KERNEL);
+ if (!mask) {
+ err = -ENOMEM;
+ goto free_input;
+ }
+ jump->input = input;
+ jump->mask = mask;
+ jump->link_hdl = cls->knode.handle;
+
+ err = ixgbe_clsu32_build_input(input, mask, cls,
+ field_ptr, &nexthdr[i]);
+ if (!err) {
+ jump->mat = nexthdr[i].jump;
+ adapter->jump_tables[link_uhtid] = jump;
break;
}
}
-
- if (!found_entry)
- goto err_out;
+ return 0;
}
- mask.formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK |
- IXGBE_ATR_L4TYPE_MASK;
+ input = kzalloc(sizeof(*input), GFP_KERNEL);
+ if (!input)
+ return -ENOMEM;
+ mask = kzalloc(sizeof(*mask), GFP_KERNEL);
+ if (!mask) {
+ err = -ENOMEM;
+ goto free_input;
+ }
- if (input->filter.formatted.flow_type == IXGBE_ATR_FLOW_TYPE_IPV4)
- mask.formatted.flow_type &= IXGBE_ATR_L4TYPE_IPV6_MASK;
+ if ((uhtid != 0x800) && (adapter->jump_tables[uhtid])) {
+ if ((adapter->jump_tables[uhtid])->input)
+ memcpy(input, (adapter->jump_tables[uhtid])->input,
+ sizeof(*input));
+ if ((adapter->jump_tables[uhtid])->mask)
+ memcpy(mask, (adapter->jump_tables[uhtid])->mask,
+ sizeof(*mask));
-#ifdef CONFIG_NET_CLS_ACT
- if (list_empty(&cls->knode.exts->actions))
+ /* Lookup in all child hash tables if this location is already
+ * filled with a filter
+ */
+ for (i = 1; i < IXGBE_MAX_LINK_HANDLE; i++) {
+ struct ixgbe_jump_table *link = adapter->jump_tables[i];
+
+ if (link && (test_bit(loc - 1, link->child_loc_map))) {
+ e_err(drv, "Filter exists in location: %x\n",
+ loc);
+ err = -EINVAL;
+ goto err_out;
+ }
+ }
+ }
+ err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL);
+ if (err)
goto err_out;
- list_for_each_entry(a, &cls->knode.exts->actions, list) {
- if (!is_tcf_gact_shot(a))
- goto err_out;
- }
-#endif
+ err = parse_tc_actions(adapter, cls->knode.exts, &input->action,
+ &queue);
+ if (err < 0)
+ goto err_out;
- input->action = IXGBE_FDIR_DROP_QUEUE;
- queue = IXGBE_FDIR_DROP_QUEUE;
input->sw_idx = loc;
spin_lock(&adapter->fdir_perfect_lock);
if (hlist_empty(&adapter->fdir_filter_list)) {
- memcpy(&adapter->fdir_mask, &mask, sizeof(mask));
- err = ixgbe_fdir_set_input_mask_82599(hw, &mask);
+ memcpy(&adapter->fdir_mask, mask, sizeof(*mask));
+ err = ixgbe_fdir_set_input_mask_82599(hw, mask);
if (err)
goto err_out_w_lock;
- } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) {
+ } else if (memcmp(&adapter->fdir_mask, mask, sizeof(*mask))) {
err = -EINVAL;
goto err_out_w_lock;
}
- ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask);
+ ixgbe_atr_compute_perfect_hash_82599(&input->filter, mask);
err = ixgbe_fdir_write_perfect_filter_82599(hw, &input->filter,
input->sw_idx, queue);
if (!err)
ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx);
spin_unlock(&adapter->fdir_perfect_lock);
+ if ((uhtid != 0x800) && (adapter->jump_tables[uhtid]))
+ set_bit(loc - 1, (adapter->jump_tables[uhtid])->child_loc_map);
+
+ kfree(mask);
return err;
err_out_w_lock:
spin_unlock(&adapter->fdir_perfect_lock);
err_out:
+ kfree(mask);
+free_input:
kfree(input);
- return -EINVAL;
+free_jump:
+ kfree(jump);
+ return err;
}
static int __ixgbe_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
@@ -8515,48 +8796,46 @@ static int ixgbe_set_features(struct net_device *netdev,
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
}
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- ixgbe_vlan_strip_enable(adapter);
- else
- ixgbe_vlan_strip_disable(adapter);
-
if (changed & NETIF_F_RXALL)
need_reset = true;
netdev->features = features;
-#ifdef CONFIG_IXGBE_VXLAN
if ((adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) {
if (features & NETIF_F_RXCSUM)
adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
else
ixgbe_clear_vxlan_port(adapter);
}
-#endif /* CONFIG_IXGBE_VXLAN */
if (need_reset)
ixgbe_do_reset(netdev);
+ else if (changed & (NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_FILTER))
+ ixgbe_set_rx_mode(netdev);
return 0;
}
-#ifdef CONFIG_IXGBE_VXLAN
/**
* ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up
* @dev: The port's netdev
- * @sa_family: Socket Family that VXLAN is notifiying us about
- * @port: New UDP port number that VXLAN started listening to
+ * @ti: Tunnel endpoint information
**/
-static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
- __be16 port)
+static void ixgbe_add_vxlan_port(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
struct ixgbe_hw *hw = &adapter->hw;
+ __be16 port = ti->port;
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (sa_family == AF_INET6)
+ if (ti->sa_family != AF_INET)
+ return;
+
+ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
return;
if (adapter->vxlan_port == port)
@@ -8576,30 +8855,31 @@ static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
/**
* ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away
* @dev: The port's netdev
- * @sa_family: Socket Family that VXLAN is notifying us about
- * @port: UDP port number that VXLAN stopped listening to
+ * @ti: Tunnel endpoint information
**/
-static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
- __be16 port)
+static void ixgbe_del_vxlan_port(struct net_device *dev,
+ struct udp_tunnel_info *ti)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
- if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ if (ti->sa_family != AF_INET)
return;
- if (sa_family == AF_INET6)
+ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE))
return;
- if (adapter->vxlan_port != port) {
+ if (adapter->vxlan_port != ti->port) {
netdev_info(dev, "Port %d was not found, not deleting\n",
- ntohs(port));
+ ntohs(ti->port));
return;
}
ixgbe_clear_vxlan_port(adapter);
adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED;
}
-#endif /* CONFIG_IXGBE_VXLAN */
static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev,
@@ -8833,17 +9113,36 @@ static void ixgbe_fwd_del(struct net_device *pdev, void *priv)
kfree(fwd_adapter);
}
-#define IXGBE_MAX_TUNNEL_HDR_LEN 80
+#define IXGBE_MAX_MAC_HDR_LEN 127
+#define IXGBE_MAX_NETWORK_HDR_LEN 511
+
static netdev_features_t
ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
netdev_features_t features)
{
- if (!skb->encapsulation)
- return features;
-
- if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) >
- IXGBE_MAX_TUNNEL_HDR_LEN))
- return features & ~NETIF_F_CSUM_MASK;
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
return features;
}
@@ -8858,6 +9157,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_mac_address = ixgbe_set_mac,
.ndo_change_mtu = ixgbe_change_mtu,
.ndo_tx_timeout = ixgbe_tx_timeout,
+ .ndo_set_tx_maxrate = ixgbe_tx_maxrate,
.ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid,
.ndo_do_ioctl = ixgbe_ioctl,
@@ -8892,10 +9192,8 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_bridge_getlink = ixgbe_ndo_bridge_getlink,
.ndo_dfwd_add_station = ixgbe_fwd_add,
.ndo_dfwd_del_station = ixgbe_fwd_del,
-#ifdef CONFIG_IXGBE_VXLAN
- .ndo_add_vxlan_port = ixgbe_add_vxlan_port,
- .ndo_del_vxlan_port = ixgbe_del_vxlan_port,
-#endif /* CONFIG_IXGBE_VXLAN */
+ .ndo_udp_tunnel_add = ixgbe_add_vxlan_port,
+ .ndo_udp_tunnel_del = ixgbe_del_vxlan_port,
.ndo_features_check = ixgbe_features_check,
};
@@ -8943,7 +9241,7 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
/**
* ixgbe_wol_supported - Check whether device supports WoL
- * @hw: hw specific details
+ * @adapter: the adapter private structure
* @device_id: the device ID
* @subdev_id: the subsystem device ID
*
@@ -8951,19 +9249,33 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
* which devices have WoL support
*
**/
-int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
- u16 subdevice_id)
+bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
+ u16 subdevice_id)
{
struct ixgbe_hw *hw = &adapter->hw;
u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK;
- int is_wol_supported = 0;
+ /* WOL not supported on 82598 */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return false;
+
+ /* check eeprom to see if WOL is enabled for X540 and newer */
+ if (hw->mac.type >= ixgbe_mac_X540) {
+ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
+ ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
+ (hw->bus.func == 0)))
+ return true;
+ }
+
+ /* WOL is determined based on device IDs for 82599 MACs */
switch (device_id) {
case IXGBE_DEV_ID_82599_SFP:
/* Only these subdevices could supports WOL */
switch (subdevice_id) {
- case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
case IXGBE_SUBDEV_ID_82599_560FLR:
+ case IXGBE_SUBDEV_ID_82599_LOM_SNAP6:
+ case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
+ case IXGBE_SUBDEV_ID_82599_SFP_2OCP:
/* only support first port */
if (hw->bus.func != 0)
break;
@@ -8971,66 +9283,31 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
case IXGBE_SUBDEV_ID_82599_SFP:
case IXGBE_SUBDEV_ID_82599_RNDC:
case IXGBE_SUBDEV_ID_82599_ECNA_DP:
- case IXGBE_SUBDEV_ID_82599_LOM_SFP:
- is_wol_supported = 1;
- break;
+ case IXGBE_SUBDEV_ID_82599_SFP_1OCP:
+ case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1:
+ case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2:
+ return true;
}
break;
case IXGBE_DEV_ID_82599EN_SFP:
- /* Only this subdevice supports WOL */
+ /* Only these subdevices support WOL */
switch (subdevice_id) {
case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1:
- is_wol_supported = 1;
- break;
+ return true;
}
break;
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
/* All except this subdevice support WOL */
if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
- is_wol_supported = 1;
+ return true;
break;
case IXGBE_DEV_ID_82599_KX4:
- is_wol_supported = 1;
- break;
- case IXGBE_DEV_ID_X540T:
- case IXGBE_DEV_ID_X540T1:
- case IXGBE_DEV_ID_X550T:
- case IXGBE_DEV_ID_X550EM_X_KX4:
- case IXGBE_DEV_ID_X550EM_X_KR:
- case IXGBE_DEV_ID_X550EM_X_10G_T:
- /* check eeprom to see if enabled wol */
- if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) ||
- ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) &&
- (hw->bus.func == 0))) {
- is_wol_supported = 1;
- }
+ return true;
+ default:
break;
}
- return is_wol_supported;
-}
-
-/**
- * ixgbe_get_platform_mac_addr - Look up MAC address in Open Firmware / IDPROM
- * @adapter: Pointer to adapter struct
- */
-static void ixgbe_get_platform_mac_addr(struct ixgbe_adapter *adapter)
-{
-#ifdef CONFIG_OF
- struct device_node *dp = pci_device_to_OF_node(adapter->pdev);
- struct ixgbe_hw *hw = &adapter->hw;
- const unsigned char *addr;
-
- addr = of_get_mac_address(dp);
- if (addr) {
- ether_addr_copy(hw->mac.perm_addr, addr);
- return;
- }
-#endif /* CONFIG_OF */
-
-#ifdef CONFIG_SPARC
- ether_addr_copy(hw->mac.perm_addr, idprom->id_ethaddr);
-#endif /* CONFIG_SPARC */
+ return false;
}
/**
@@ -9084,8 +9361,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_using_dac = 0;
}
- err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM), ixgbe_driver_name);
+ err = pci_request_mem_regions(pdev, ixgbe_driver_name);
if (err) {
dev_err(&pdev->dev,
"pci_request_selected_regions failed 0x%x\n", err);
@@ -9136,23 +9412,23 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
/* Setup hw api */
- memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+ hw->mac.ops = *ii->mac_ops;
hw->mac.type = ii->mac;
hw->mvals = ii->mvals;
/* EEPROM */
- memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
+ hw->eeprom.ops = *ii->eeprom_ops;
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
if (ixgbe_removed(hw->hw_addr)) {
err = -EIO;
goto err_ioremap;
}
/* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
- if (!(eec & (1 << 8)))
+ if (!(eec & BIT(8)))
hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
/* PHY */
- memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
+ hw->phy.ops = *ii->phy_ops;
hw->phy.sfp_type = ixgbe_sfp_type_unknown;
/* ixgbe_identify_phy_generic will set prtad and mmds properly */
hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
@@ -9169,12 +9445,17 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto err_sw_init;
+ /* Make sure the SWFW semaphore is in a valid state */
+ if (hw->mac.ops.init_swfw_sync)
+ hw->mac.ops.init_swfw_sync(hw);
+
/* Make it possible the adapter to be woken up via WOL */
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
break;
default:
@@ -9215,54 +9496,63 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto skip_sriov;
/* Mailbox */
ixgbe_init_mbx_params_pf(hw);
- memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
+ hw->mbx.ops = ii->mbx_ops;
pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT);
ixgbe_enable_sriov(adapter);
skip_sriov:
#endif
netdev->features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_TSO |
NETIF_F_TSO6 |
NETIF_F_RXHASH |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM;
- netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD;
+#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
- switch (adapter->hw.mac.type) {
- case ixgbe_mac_82599EB:
- case ixgbe_mac_X540:
- case ixgbe_mac_X550:
- case ixgbe_mac_X550EM_x:
+ netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES;
+ netdev->features |= NETIF_F_GSO_PARTIAL |
+ IXGBE_GSO_PARTIAL_FEATURES;
+
+ if (hw->mac.type >= ixgbe_mac_82599EB)
netdev->features |= NETIF_F_SCTP_CRC;
- netdev->hw_features |= NETIF_F_SCTP_CRC |
- NETIF_F_NTUPLE |
+
+ /* copy netdev features into list of user selectable features */
+ netdev->hw_features |= netdev->features |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_RXALL |
+ NETIF_F_HW_L2FW_DOFFLOAD;
+
+ if (hw->mac.type >= ixgbe_mac_82599EB)
+ netdev->hw_features |= NETIF_F_NTUPLE |
NETIF_F_HW_TC;
- break;
- default:
- break;
- }
- netdev->hw_features |= NETIF_F_RXALL;
- netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_TSO;
- netdev->vlan_features |= NETIF_F_TSO6;
- netdev->vlan_features |= NETIF_F_IP_CSUM;
- netdev->vlan_features |= NETIF_F_IPV6_CSUM;
- netdev->vlan_features |= NETIF_F_SG;
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+ netdev->hw_enc_features |= netdev->vlan_features;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
- netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->priv_flags |= IFF_SUPP_NOFCS;
#ifdef CONFIG_IXGBE_DCB
- netdev->dcbnl_ops = &dcbnl_ops;
+ if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE)
+ netdev->dcbnl_ops = &dcbnl_ops;
#endif
#ifdef IXGBE_FCOE
@@ -9287,10 +9577,6 @@ skip_sriov:
NETIF_F_FCOE_MTU;
}
#endif /* IXGBE_FCOE */
- if (pci_using_dac) {
- netdev->features |= NETIF_F_HIGHDMA;
- netdev->vlan_features |= NETIF_F_HIGHDMA;
- }
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
netdev->hw_features |= NETIF_F_LRO;
@@ -9304,7 +9590,8 @@ skip_sriov:
goto err_sw_init;
}
- ixgbe_get_platform_mac_addr(adapter);
+ eth_platform_get_mac_address(&adapter->pdev->dev,
+ adapter->hw.mac.perm_addr);
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
@@ -9455,13 +9742,13 @@ err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(adapter->io_addr);
+ kfree(adapter->jump_tables[0]);
kfree(adapter->mac_table);
err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
- pci_release_selected_regions(pdev,
- pci_select_bars(pdev, IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
if (!adapter || disable_dev)
@@ -9483,6 +9770,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev;
bool disable_dev;
+ int i;
/* if !adapter then we already cleaned up in probe */
if (!adapter)
@@ -9527,11 +9815,18 @@ static void ixgbe_remove(struct pci_dev *pdev)
#endif
iounmap(adapter->io_addr);
- pci_release_selected_regions(pdev, pci_select_bars(pdev,
- IORESOURCE_MEM));
+ pci_release_mem_regions(pdev);
e_dev_info("complete\n");
+ for (i = 0; i < IXGBE_MAX_LINK_HANDLE; i++) {
+ if (adapter->jump_tables[i]) {
+ kfree(adapter->jump_tables[i]->input);
+ kfree(adapter->jump_tables[i]->mask);
+ }
+ kfree(adapter->jump_tables[i]);
+ }
+
kfree(adapter->mac_table);
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
@@ -9612,6 +9907,9 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
case ixgbe_mac_X550EM_x:
device_id = IXGBE_DEV_ID_X550EM_X_VF;
break;
+ case ixgbe_mac_x550em_a:
+ device_id = IXGBE_DEV_ID_X550EM_A_VF;
+ break;
default:
device_id = 0;
break;
@@ -9781,6 +10079,7 @@ static int __init ixgbe_init_module(void)
ret = pci_register_driver(&ixgbe_driver);
if (ret) {
+ destroy_workqueue(ixgbe_wq);
ixgbe_dbg_exit();
return ret;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 9993a471d668..a0cb84381cd0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -48,10 +48,10 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
if (size > mbx->size)
size = mbx->size;
- if (!mbx->ops.read)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.read(hw, msg, size, mbx_id);
+ return mbx->ops->read(hw, msg, size, mbx_id);
}
/**
@@ -70,10 +70,10 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
if (size > mbx->size)
return IXGBE_ERR_MBX;
- if (!mbx->ops.write)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.write(hw, msg, size, mbx_id);
+ return mbx->ops->write(hw, msg, size, mbx_id);
}
/**
@@ -87,10 +87,10 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (!mbx->ops.check_for_msg)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.check_for_msg(hw, mbx_id);
+ return mbx->ops->check_for_msg(hw, mbx_id);
}
/**
@@ -104,10 +104,10 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (!mbx->ops.check_for_ack)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.check_for_ack(hw, mbx_id);
+ return mbx->ops->check_for_ack(hw, mbx_id);
}
/**
@@ -121,10 +121,10 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- if (!mbx->ops.check_for_rst)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
- return mbx->ops.check_for_rst(hw, mbx_id);
+ return mbx->ops->check_for_rst(hw, mbx_id);
}
/**
@@ -139,10 +139,10 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
- if (!countdown || !mbx->ops.check_for_msg)
+ if (!countdown || !mbx->ops)
return IXGBE_ERR_MBX;
- while (mbx->ops.check_for_msg(hw, mbx_id)) {
+ while (mbx->ops->check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
return IXGBE_ERR_MBX;
@@ -164,10 +164,10 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
- if (!countdown || !mbx->ops.check_for_ack)
+ if (!countdown || !mbx->ops)
return IXGBE_ERR_MBX;
- while (mbx->ops.check_for_ack(hw, mbx_id)) {
+ while (mbx->ops->check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
return IXGBE_ERR_MBX;
@@ -193,7 +193,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val;
- if (!mbx->ops.read)
+ if (!mbx->ops)
return IXGBE_ERR_MBX;
ret_val = ixgbe_poll_for_msg(hw, mbx_id);
@@ -201,7 +201,7 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
return ret_val;
/* if ack received read message */
- return mbx->ops.read(hw, msg, size, mbx_id);
+ return mbx->ops->read(hw, msg, size, mbx_id);
}
/**
@@ -221,11 +221,11 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
s32 ret_val;
/* exit if either we can't write or there isn't a defined timeout */
- if (!mbx->ops.write || !mbx->timeout)
+ if (!mbx->ops || !mbx->timeout)
return IXGBE_ERR_MBX;
/* send msg */
- ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+ ret_val = mbx->ops->write(hw, msg, size, mbx_id);
if (ret_val)
return ret_val;
@@ -307,14 +307,15 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
case ixgbe_mac_X540:
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
break;
}
- if (vflre & (1 << vf_shift)) {
- IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ if (vflre & BIT(vf_shift)) {
+ IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), BIT(vf_shift));
hw->mbx.stats.rsts++;
return 0;
}
@@ -430,6 +431,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
if (hw->mac.type != ixgbe_mac_82599EB &&
hw->mac.type != ixgbe_mac_X550 &&
hw->mac.type != ixgbe_mac_X550EM_x &&
+ hw->mac.type != ixgbe_mac_x550em_a &&
hw->mac.type != ixgbe_mac_X540)
return;
@@ -446,7 +448,7 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
}
#endif /* CONFIG_PCI_IOV */
-struct ixgbe_mbx_operations mbx_ops_generic = {
+const struct ixgbe_mbx_operations mbx_ops_generic = {
.read = ixgbe_read_mbx_pf,
.write = ixgbe_write_mbx_pf,
.read_posted = ixgbe_read_posted_mbx,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index 8daa95f74548..01c2667c0f92 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -123,6 +123,6 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
#endif /* CONFIG_PCI_IOV */
-extern struct ixgbe_mbx_operations mbx_ops_generic;
+extern const struct ixgbe_mbx_operations mbx_ops_generic;
#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
index 74c53ad9d268..538a1c5475b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_model.h
@@ -38,6 +38,16 @@ struct ixgbe_mat_field {
unsigned int type;
};
+struct ixgbe_jump_table {
+ struct ixgbe_mat_field *mat;
+ struct ixgbe_fdir_filter *input;
+ union ixgbe_atr_input *mask;
+ u32 link_hdl;
+ unsigned long child_loc_map[32];
+};
+
+#define IXGBE_MAX_HW_ENTRIES 2045
+
static inline int ixgbe_mat_prgm_sip(struct ixgbe_fdir_filter *input,
union ixgbe_atr_input *mask,
u32 val, u32 m)
@@ -82,6 +92,12 @@ static struct ixgbe_mat_field ixgbe_tcp_fields[] = {
{ .val = NULL } /* terminal node */
};
+static struct ixgbe_mat_field ixgbe_udp_fields[] = {
+ {.off = 0, .val = ixgbe_mat_prgm_ports,
+ .type = IXGBE_ATR_FLOW_TYPE_UDPV4},
+ { .val = NULL } /* terminal node */
+};
+
struct ixgbe_nexthdr {
/* offset, shift, and mask of position to next header */
unsigned int o;
@@ -98,6 +114,8 @@ struct ixgbe_nexthdr {
static struct ixgbe_nexthdr ixgbe_ipv4_jumps[] = {
{ .o = 0, .s = 6, .m = 0xf,
.off = 8, .val = 0x600, .mask = 0xff00, .jump = ixgbe_tcp_fields},
+ { .o = 0, .s = 6, .m = 0xf,
+ .off = 8, .val = 0x1100, .mask = 0xff00, .jump = ixgbe_udp_fields},
{ .jump = NULL } /* terminal node */
};
#endif /* _IXGBE_MODEL_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 5abd66c84d00..cc735ec3e045 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -81,7 +81,11 @@
#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2
#define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3
#define IXGBE_CS4227 0xBE /* CS4227 address */
+#define IXGBE_CS4227_GLOBAL_ID_LSB 0
+#define IXGBE_CS4227_GLOBAL_ID_MSB 1
#define IXGBE_CS4227_SCRATCH 2
+#define IXGBE_CS4223_PHY_ID 0x7003 /* Quad port */
+#define IXGBE_CS4227_PHY_ID 0x3003 /* Dual port */
#define IXGBE_CS4227_RESET_PENDING 0x1357
#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5
#define IXGBE_CS4227_RETRIES 15
@@ -103,7 +107,7 @@
#define IXGBE_PE 0xE0 /* Port expander addr */
#define IXGBE_PE_OUTPUT 1 /* Output reg offset */
#define IXGBE_PE_CONFIG 3 /* Config reg offset */
-#define IXGBE_PE_BIT1 (1 << 1)
+#define IXGBE_PE_BIT1 BIT(1)
/* Flow control defines */
#define IXGBE_TAF_SYM_PAUSE 0x400
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index ef1504d41890..e5431bfe3339 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -333,6 +333,7 @@ static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
*/
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/* Upper 32 bits represent billions of cycles, lower 32 bits
* represent cycles. However, we use timespec64_to_ns for the
* correct math even though the units haven't been corrected
@@ -395,7 +396,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb)
if (incval > 0x00FFFFFFULL)
e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n");
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
- (1 << IXGBE_INCPER_SHIFT_82599) |
+ BIT(IXGBE_INCPER_SHIFT_82599) |
((u32)incval & 0x00FFFFFFUL));
break;
default:
@@ -921,6 +922,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
switch (hw->mac.type) {
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
/* enable timestamping all packets only if at least some
* packets were requested. Otherwise, play nice and disable
* timestamping
@@ -1083,6 +1085,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
cc.shift = 2;
}
/* fallthrough */
+ case ixgbe_mac_x550em_a:
case ixgbe_mac_X550:
cc.read = ixgbe_ptp_read_X550;
@@ -1111,7 +1114,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
incval >>= IXGBE_INCVAL_SHIFT_82599;
cc.shift -= IXGBE_INCVAL_SHIFT_82599;
IXGBE_WRITE_REG(hw, IXGBE_TIMINCA,
- (1 << IXGBE_INCPER_SHIFT_82599) | incval);
+ BIT(IXGBE_INCPER_SHIFT_82599) | incval);
break;
default:
/* other devices aren't supported */
@@ -1223,6 +1226,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
break;
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
+ case ixgbe_mac_x550em_a:
snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 30000000;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 8025a3f93598..8618599dfd6f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -406,7 +406,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F;
vector_bit = vfinfo->vf_mc_hashes[i] & 0x1F;
mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
- mta_reg |= (1 << vector_bit);
+ mta_reg |= BIT(vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
vmolr |= IXGBE_VMOLR_ROMPE;
@@ -433,7 +433,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
- mta_reg |= (1 << vector_bit);
+ mta_reg |= BIT(vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
@@ -536,9 +536,9 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
/* enable or disable receive depending on error */
vfre = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
if (err)
- vfre &= ~(1 << vf_shift);
+ vfre &= ~BIT(vf_shift);
else
- vfre |= 1 << vf_shift;
+ vfre |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), vfre);
if (err) {
@@ -589,47 +589,47 @@ static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf)
static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 i;
+ u32 vlvfb_mask, pool_mask, i;
+
+ /* create mask for VF and other pools */
+ pool_mask = ~BIT(VMDQ_P(0) % 32);
+ vlvfb_mask = BIT(vf % 32);
/* post increment loop, covers VLVF_ENTRIES - 1 to 0 */
for (i = IXGBE_VLVF_ENTRIES; i--;) {
u32 bits[2], vlvfb, vid, vfta, vlvf;
u32 word = i * 2 + vf / 32;
- u32 mask = 1 << (vf % 32);
+ u32 mask;
vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word));
/* if our bit isn't set we can skip it */
- if (!(vlvfb & mask))
+ if (!(vlvfb & vlvfb_mask))
continue;
/* clear our bit from vlvfb */
- vlvfb ^= mask;
+ vlvfb ^= vlvfb_mask;
/* create 64b mask to chedk to see if we should clear VLVF */
bits[word % 2] = vlvfb;
bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1));
- /* if promisc is enabled, PF will be present, leave VFTA */
- if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) {
- bits[VMDQ_P(0) / 32] &= ~(1 << (VMDQ_P(0) % 32));
-
- if (bits[0] || bits[1])
- goto update_vlvfb;
- goto update_vlvf;
- }
-
/* if other pools are present, just remove ourselves */
- if (bits[0] || bits[1])
+ if (bits[(VMDQ_P(0) / 32) ^ 1] ||
+ (bits[VMDQ_P(0) / 32] & pool_mask))
goto update_vlvfb;
+ /* if PF is present, leave VFTA */
+ if (bits[0] || bits[1])
+ goto update_vlvf;
+
/* if we cannot determine VLAN just remove ourselves */
vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i));
if (!vlvf)
goto update_vlvfb;
vid = vlvf & VLAN_VID_MASK;
- mask = 1 << (vid % 32);
+ mask = BIT(vid % 32);
/* clear bit from VFTA */
vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32));
@@ -638,6 +638,9 @@ static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf)
update_vlvf:
/* clear POOL selection enable */
IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0);
+
+ if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC))
+ vlvfb = 0;
update_vlvfb:
/* clear pool bits */
IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb);
@@ -810,7 +813,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* enable transmit for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
- reg |= 1 << vf_shift;
+ reg |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
/* force drop enable for all VF Rx queues */
@@ -818,7 +821,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* enable receive for vf */
reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
- reg |= 1 << vf_shift;
+ reg |= BIT(vf_shift);
/*
* The 82599 cannot support a mix of jumbo and non-jumbo PF/VFs.
* For more info take a look at ixgbe_set_vf_lpe
@@ -834,7 +837,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
#endif /* CONFIG_FCOE */
if (pf_max_frame > ETH_FRAME_LEN)
- reg &= ~(1 << vf_shift);
+ reg &= ~BIT(vf_shift);
}
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
@@ -843,7 +846,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
/* Enable counting of spoofed packets in the SSVPC register */
reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
- reg |= (1 << vf_shift);
+ reg |= BIT(vf_shift);
IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
/*
@@ -887,7 +890,7 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter,
return -1;
}
- if (adapter->vfinfo[vf].pf_set_mac &&
+ if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted &&
!ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) {
e_warn(drv,
"VF %d attempted to override administratively set MAC address\n"
@@ -905,8 +908,6 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT;
u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
u8 tcs = netdev_get_num_tc(adapter->netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- int err;
if (adapter->vfinfo[vf].pf_vlan || tcs) {
e_warn(drv,
@@ -920,19 +921,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
if (!vid && !add)
return 0;
- err = ixgbe_set_vf_vlan(adapter, add, vid, vf);
- if (err)
- return err;
-
- if (adapter->vfinfo[vf].spoofchk_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
-
- if (add)
- adapter->vfinfo[vf].vlan_count++;
- else if (adapter->vfinfo[vf].vlan_count)
- adapter->vfinfo[vf].vlan_count--;
-
- return 0;
+ return ixgbe_set_vf_vlan(adapter, add, vid, vf);
}
static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
@@ -961,8 +950,12 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter,
* If the VF is allowed to set MAC filters then turn off
* anti-spoofing to avoid false positives.
*/
- if (adapter->vfinfo[vf].spoofchk_enabled)
- ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
+ if (adapter->vfinfo[vf].spoofchk_enabled) {
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ hw->mac.ops.set_mac_anti_spoofing(hw, false, vf);
+ hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
+ }
}
err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac);
@@ -1318,9 +1311,6 @@ static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, int vf,
ixgbe_set_vmvir(adapter, vlan, qos, vf);
ixgbe_set_vmolr(hw, vf, false);
- if (adapter->vfinfo[vf].spoofchk_enabled)
- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
- adapter->vfinfo[vf].vlan_count++;
/* enable hide vlan on X550 */
if (hw->mac.type >= ixgbe_mac_X550)
@@ -1353,9 +1343,6 @@ static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf)
ixgbe_set_vf_vlan(adapter, true, 0, vf);
ixgbe_clear_vmvir(adapter, vf);
ixgbe_set_vmolr(hw, vf, true);
- hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
- if (adapter->vfinfo[vf].vlan_count)
- adapter->vfinfo[vf].vlan_count--;
/* disable hide VLAN on X550 */
if (hw->mac.type >= ixgbe_mac_X550)
@@ -1395,7 +1382,7 @@ out:
return err;
}
-static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
+int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
{
switch (adapter->link_speed) {
case IXGBE_LINK_SPEED_100_FULL:
@@ -1522,27 +1509,34 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- int vf_target_reg = vf >> 3;
- int vf_target_shift = vf % 8;
struct ixgbe_hw *hw = &adapter->hw;
- u32 regval;
if (vf >= adapter->num_vfs)
return -EINVAL;
adapter->vfinfo[vf].spoofchk_enabled = setting;
- regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
- regval &= ~(1 << vf_target_shift);
- regval |= (setting << vf_target_shift);
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
-
- if (adapter->vfinfo[vf].vlan_count) {
- vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT;
- regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
- regval &= ~(1 << vf_target_shift);
- regval |= (setting << vf_target_shift);
- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval);
+ /* configure MAC spoofing */
+ hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf);
+
+ /* configure VLAN spoofing */
+ hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf);
+
+ /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be
+ * calling set_ethertype_anti_spoofing for each VF in loop below
+ */
+ if (hw->mac.ops.set_ethertype_anti_spoofing) {
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP),
+ (IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ IXGBE_ETH_P_LLDP));
+
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC),
+ (IXGBE_ETQF_FILTER_EN |
+ IXGBE_ETQF_TX_ANTISPOOF |
+ ETH_P_PAUSE));
+
+ hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf);
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index dad925706f4c..47e65e2f886a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -44,6 +44,7 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos);
+int ixgbe_link_mbps(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
int max_tx_rate);
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index bf7367a08716..1248a9936f7a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2015 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -59,8 +59,12 @@
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
+#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159
+#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D
+#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976
+#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE
#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470
-#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
@@ -75,21 +79,25 @@
#define IXGBE_DEV_ID_X540T1 0x1560
#define IXGBE_DEV_ID_X550T 0x1563
+#define IXGBE_DEV_ID_X550T1 0x15D1
#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA
#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB
#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
-#define IXGBE_DEV_ID_X550_VF_HV 0x1564
-#define IXGBE_DEV_ID_X550_VF 0x1565
-#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
-#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2
+#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3
+#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4
+#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6
+#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7
+#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE
/* VF Device IDs */
-#define IXGBE_DEV_ID_82599_VF 0x10ED
-#define IXGBE_DEV_ID_X540_VF 0x1515
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
#define IXGBE_CAT(r, m) IXGBE_##r##_##m
@@ -128,7 +136,7 @@
#define IXGBE_FLA_X540 IXGBE_FLA_8259X
#define IXGBE_FLA_X550 IXGBE_FLA_8259X
#define IXGBE_FLA_X550EM_x IXGBE_FLA_8259X
-#define IXGBE_FLA_X550EM_a 0x15F6C
+#define IXGBE_FLA_X550EM_a 0x15F68
#define IXGBE_FLA(_hw) IXGBE_BY_MAC((_hw), FLA)
#define IXGBE_EEMNGCTL 0x10110
#define IXGBE_EEMNGDATA 0x10114
@@ -143,13 +151,6 @@
#define IXGBE_GRC_X550EM_a 0x15F64
#define IXGBE_GRC(_hw) IXGBE_BY_MAC((_hw), GRC)
-#define IXGBE_SRAMREL_8259X 0x10210
-#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL_8259X
-#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL_8259X
-#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL_8259X
-#define IXGBE_SRAMREL_X550EM_a 0x15F6C
-#define IXGBE_SRAMREL(_hw) IXGBE_BY_MAC((_hw), SRAMREL)
-
/* General Receive Control */
#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */
#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */
@@ -375,6 +376,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4))
#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4))
#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4))
+#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */
+#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */
#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/
#define IXGBE_RXFECCERR0 0x051B8
#define IXGBE_LLITHRESH 0x0EC90
@@ -446,6 +449,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */
#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
+#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */
+#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */
#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
@@ -543,6 +548,7 @@ struct ixgbe_thermal_sensor_data {
/* DCB registers */
#define MAX_TRAFFIC_CLASS 8
#define X540_TRAFFIC_CLASS 4
+#define DEF_TRAFFIC_CLASS 1
#define IXGBE_RMCS 0x03D00
#define IXGBE_DPMCS 0x07F40
#define IXGBE_PDPMCS 0x0CD00
@@ -554,7 +560,6 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */
#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */
-
/* Security Control Registers */
#define IXGBE_SECTXCTRL 0x08800
#define IXGBE_SECTXSTAT 0x08804
@@ -693,16 +698,16 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */
#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */
#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4))
-#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */
-#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */
-#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */
+#define IXGBE_FCBUFF_VALID BIT(0) /* DMA Context Valid */
+#define IXGBE_FCBUFF_BUFFSIZE (3u << 3) /* User Buffer Size */
+#define IXGBE_FCBUFF_WRCONTX BIT(7) /* 0: Initiator, 1: Target */
#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */
#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */
#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3
#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8
#define IXGBE_FCBUFF_OFFSET_SHIFT 16
-#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */
-#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */
+#define IXGBE_FCDMARW_WE BIT(14) /* Write enable */
+#define IXGBE_FCDMARW_RE BIT(15) /* Read enable */
#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */
#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */
#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16
@@ -719,23 +724,23 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_FCFLT 0x05108 /* FC FLT Context */
#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */
#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */
-#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */
-#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */
+#define IXGBE_FCFLT_VALID BIT(0) /* Filter Context Valid */
+#define IXGBE_FCFLT_FIRST BIT(1) /* Filter First */
#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */
#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */
-#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */
-#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */
-#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */
+#define IXGBE_FCFLTRW_RVALDT BIT(13) /* Fast Re-Validation */
+#define IXGBE_FCFLTRW_WE BIT(14) /* Write Enable */
+#define IXGBE_FCFLTRW_RE BIT(15) /* Read Enable */
/* FCoE Receive Control */
#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */
-#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */
-#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */
-#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */
-#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */
-#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */
-#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */
-#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */
-#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */
+#define IXGBE_FCRXCTRL_FCOELLI BIT(0) /* Low latency interrupt */
+#define IXGBE_FCRXCTRL_SAVBAD BIT(1) /* Save Bad Frames */
+#define IXGBE_FCRXCTRL_FRSTRDH BIT(2) /* EN 1st Read Header */
+#define IXGBE_FCRXCTRL_LASTSEQH BIT(3) /* EN Last Header in Seq */
+#define IXGBE_FCRXCTRL_ALLH BIT(4) /* EN All Headers */
+#define IXGBE_FCRXCTRL_FRSTSEQH BIT(5) /* EN 1st Seq. Header */
+#define IXGBE_FCRXCTRL_ICRC BIT(6) /* Ignore Bad FC CRC */
+#define IXGBE_FCRXCTRL_FCCRCBO BIT(7) /* FC CRC Byte Ordering */
#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */
#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8
/* FCoE Redirection */
@@ -1056,15 +1061,9 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4))
#define IXGBE_TDPROBE 0x07F20
#define IXGBE_TXBUFCTRL 0x0C600
-#define IXGBE_TXBUFDATA0 0x0C610
-#define IXGBE_TXBUFDATA1 0x0C614
-#define IXGBE_TXBUFDATA2 0x0C618
-#define IXGBE_TXBUFDATA3 0x0C61C
+#define IXGBE_TXBUFDATA(_i) (0x0C610 + ((_i) * 4)) /* 4 of these (0-3) */
#define IXGBE_RXBUFCTRL 0x03600
-#define IXGBE_RXBUFDATA0 0x03610
-#define IXGBE_RXBUFDATA1 0x03614
-#define IXGBE_RXBUFDATA2 0x03618
-#define IXGBE_RXBUFDATA3 0x0361C
+#define IXGBE_RXBUFDATA(_i) (0x03610 + ((_i) * 4)) /* 4 of these (0-3) */
#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */
#define IXGBE_RFVAL 0x050A4
#define IXGBE_MDFTC1 0x042B8
@@ -1127,6 +1126,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_XPCSS 0x04290
#define IXGBE_MFLCN 0x04294
#define IXGBE_SERDESC 0x04298
+#define IXGBE_MAC_SGMII_BUSY 0x04298
#define IXGBE_MACS 0x0429C
#define IXGBE_AUTOC 0x042A0
#define IXGBE_LINKS 0x042A4
@@ -1203,6 +1203,8 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */
#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */
#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */
+#define IXGBE_RDRXCTL_MBINTEN 0x10000000
+#define IXGBE_RDRXCTL_MDP_EN 0x20000000
/* RQTC Bit Masks and Shifts */
#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4)
@@ -1249,20 +1251,20 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */
#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* DCA Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* DCA Rx Desc header enable */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* DCA Rx Desc payload enable */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* DCA Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */
#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */
#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */
/* MSCA Bit Masks */
@@ -1309,6 +1311,7 @@ struct ixgbe_thermal_sensor_data {
/* MDIO definitions */
+#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0
#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1
#define IXGBE_MDIO_PCS_DEV_TYPE 0x3
#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4
@@ -1740,7 +1743,7 @@ enum {
#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */
#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */
#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */
-#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */
+#define IXGBE_ETQF_POOL_ENABLE BIT(26) /* bit 26 */
#define IXGBE_ETQF_POOL_SHIFT 20
#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */
@@ -1866,20 +1869,20 @@ enum {
#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9
#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180
#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7
-#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_XAUI (0u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_KX4 (1u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_10G_CX4 (2u << IXGBE_AUTOC_10G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_BX (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_SFI (0u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC_1G_KX_BX (1u << IXGBE_AUTOC_1G_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000
#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000
#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16
-#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
-#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_KR (0u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_XFI (1u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
+#define IXGBE_AUTOC2_10G_SFI (2u << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT)
#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000
#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000
@@ -1957,7 +1960,9 @@ enum {
#define IXGBE_GSSR_PHY1_SM 0x0004
#define IXGBE_GSSR_MAC_CSR_SM 0x0008
#define IXGBE_GSSR_FLASH_SM 0x0010
+#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200
#define IXGBE_GSSR_SW_MNG_SM 0x0400
+#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */
#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys & I2Cs */
#define IXGBE_GSSR_I2C_MASK 0x1800
#define IXGBE_GSSR_NVM_PHY_MASK 0xF
@@ -1997,6 +2002,9 @@ enum {
#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
#define IXGBE_EEPROM_CHECKSUM 0x3F
#define IXGBE_EEPROM_SUM 0xBABA
+#define IXGBE_EEPROM_CTRL_4 0x45
+#define IXGBE_EE_CTRL_4_INST_ID 0x10
+#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4
#define IXGBE_PCIE_ANALOG_PTR 0x03
#define IXGBE_ATLAS0_CONFIG_PTR 0x04
#define IXGBE_PHY_PTR 0x04
@@ -2111,6 +2119,7 @@ enum {
#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3
#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1
#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2
+#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR BIT(7)
#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2
#define IXGBE_FW_LESM_STATE_1 0x1
#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */
@@ -2530,6 +2539,10 @@ enum ixgbe_fdir_pballoc_type {
#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080
#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8
#define IXGBE_FDIRCTRL_FLEX_SHIFT 16
+#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000
+#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21
+#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */
+#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */
#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000
#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24
#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000
@@ -2620,6 +2633,20 @@ enum ixgbe_fdir_pballoc_type {
#define FW_MAX_READ_BUFFER_SIZE 1024
#define FW_DISABLE_RXEN_CMD 0xDE
#define FW_DISABLE_RXEN_LEN 0x1
+#define FW_PHY_MGMT_REQ_CMD 0x20
+#define FW_PHY_TOKEN_REQ_CMD 0x0A
+#define FW_PHY_TOKEN_REQ_LEN 2
+#define FW_PHY_TOKEN_REQ 0
+#define FW_PHY_TOKEN_REL 1
+#define FW_PHY_TOKEN_OK 1
+#define FW_PHY_TOKEN_RETRY 0x80
+#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */
+#define FW_PHY_TOKEN_WAIT 5 /* seconds */
+#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY)
+#define FW_INT_PHY_REQ_CMD 0xB
+#define FW_INT_PHY_REQ_LEN 10
+#define FW_INT_PHY_REQ_READ 0
+#define FW_INT_PHY_REQ_WRITE 1
/* Host Interface Command Structures */
struct ixgbe_hic_hdr {
@@ -2688,6 +2715,28 @@ struct ixgbe_hic_disable_rxen {
u16 pad3;
};
+struct ixgbe_hic_phy_token_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 command_type;
+ u16 pad;
+};
+
+struct ixgbe_hic_internal_phy_req {
+ struct ixgbe_hic_hdr hdr;
+ u8 port_number;
+ u8 command_type;
+ __be16 address;
+ u16 rsv1;
+ __be32 write_data;
+ u16 pad;
+} __packed;
+
+struct ixgbe_hic_internal_phy_resp {
+ struct ixgbe_hic_hdr hdr;
+ __be32 read_data;
+};
+
/* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc {
struct {
@@ -2786,15 +2835,15 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */
#define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */
#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */
-#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */
-#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */
-#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */
-#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */
-#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */
-#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */
-#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */
-#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */
-#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */
+#define IXGBE_ADVTXD_FCOEF_SOF (BIT(2) << 10) /* FC SOF index */
+#define IXGBE_ADVTXD_FCOEF_PARINC (BIT(3) << 10) /* Rel_Off in F_CTL */
+#define IXGBE_ADVTXD_FCOEF_ORIE (BIT(4) << 10) /* Orientation: End */
+#define IXGBE_ADVTXD_FCOEF_ORIS (BIT(5) << 10) /* Orientation: Start */
+#define IXGBE_ADVTXD_FCOEF_EOF_N (0u << 10) /* 00: EOFn */
+#define IXGBE_ADVTXD_FCOEF_EOF_T (1u << 10) /* 01: EOFt */
+#define IXGBE_ADVTXD_FCOEF_EOF_NI (2u << 10) /* 10: EOFni */
+#define IXGBE_ADVTXD_FCOEF_EOF_A (3u << 10) /* 11: EOFa */
+#define IXGBE_ADVTXD_FCOEF_EOF_MASK (3u << 10) /* FC EOF index */
#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
@@ -2948,7 +2997,6 @@ union ixgbe_atr_hash_dword {
IXGBE_CAT(EEC, m), \
IXGBE_CAT(FLA, m), \
IXGBE_CAT(GRC, m), \
- IXGBE_CAT(SRAMREL, m), \
IXGBE_CAT(FACTPS, m), \
IXGBE_CAT(SWSM, m), \
IXGBE_CAT(SWFW_SYNC, m), \
@@ -2989,6 +3037,7 @@ enum ixgbe_mac_type {
ixgbe_mac_X540,
ixgbe_mac_X550,
ixgbe_mac_X550EM_x,
+ ixgbe_mac_x550em_a,
ixgbe_num_macs
};
@@ -3017,6 +3066,7 @@ enum ixgbe_phy_type {
ixgbe_phy_qsfp_intel,
ixgbe_phy_qsfp_unknown,
ixgbe_phy_sfp_unsupported,
+ ixgbe_phy_sgmii,
ixgbe_phy_generic
};
@@ -3130,8 +3180,9 @@ struct ixgbe_bus_info {
enum ixgbe_bus_width width;
enum ixgbe_bus_type type;
- u16 func;
- u16 lan_id;
+ u8 func;
+ u8 lan_id;
+ u8 instance_id;
};
/* Flow control parameters */
@@ -3266,6 +3317,7 @@ struct ixgbe_mac_operations {
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32);
void (*release_swfw_sync)(struct ixgbe_hw *, u32);
+ void (*init_swfw_sync)(struct ixgbe_hw *);
s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
@@ -3308,6 +3360,7 @@ struct ixgbe_mac_operations {
/* Flow Control */
s32 (*fc_enable)(struct ixgbe_hw *);
+ s32 (*setup_fc)(struct ixgbe_hw *);
/* Manageability interface */
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
@@ -3323,6 +3376,8 @@ struct ixgbe_mac_operations {
s32 (*dmac_config)(struct ixgbe_hw *hw);
s32 (*dmac_update_tcs)(struct ixgbe_hw *hw);
s32 (*dmac_config_tcs)(struct ixgbe_hw *hw);
+ s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *);
+ s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32);
};
struct ixgbe_phy_operations {
@@ -3442,7 +3497,7 @@ struct ixgbe_mbx_stats {
};
struct ixgbe_mbx_info {
- struct ixgbe_mbx_operations ops;
+ const struct ixgbe_mbx_operations *ops;
struct ixgbe_mbx_stats stats;
u32 timeout;
u32 usec_delay;
@@ -3470,15 +3525,16 @@ struct ixgbe_hw {
bool force_full_reset;
bool allow_unsupported_sfp;
bool wol_enabled;
+ bool need_crosstalk_fix;
};
struct ixgbe_info {
enum ixgbe_mac_type mac;
s32 (*get_invariants)(struct ixgbe_hw *);
- struct ixgbe_mac_operations *mac_ops;
- struct ixgbe_eeprom_operations *eeprom_ops;
- struct ixgbe_phy_operations *phy_ops;
- struct ixgbe_mbx_operations *mbx_ops;
+ const struct ixgbe_mac_operations *mac_ops;
+ const struct ixgbe_eeprom_operations *eeprom_ops;
+ const struct ixgbe_phy_operations *phy_ops;
+ const struct ixgbe_mbx_operations *mbx_ops;
const u32 *mvals;
};
@@ -3517,14 +3573,19 @@ struct ixgbe_info {
#define IXGBE_ERR_INVALID_ARGUMENT -32
#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33
#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38
+#define IXGBE_ERR_FW_RESP_INVALID -39
+#define IXGBE_ERR_TOKEN_RETRY -40
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4))
#define IXGBE_FUSES0_300MHZ BIT(5)
-#define IXGBE_FUSES0_REV_MASK (3 << 6)
+#define IXGBE_FUSES0_REV_MASK (3u << 6)
#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
+#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638)
#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00)
@@ -3532,43 +3593,54 @@ struct ixgbe_info {
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9)
-#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B BIT(9)
+#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS BIT(11)
+
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (7u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4u << 8)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN BIT(12)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN BIT(13)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ BIT(14)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC BIT(15)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX BIT(16)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR BIT(18)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX BIT(24)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR BIT(26)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE BIT(29)
+#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART BIT(31)
+
+#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE BIT(28)
+#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE BIT(29)
+
+#define IXGBE_KRM_AN_CNTL_8_LINEAR BIT(0)
+#define IXGBE_KRM_AN_CNTL_8_LIMITING BIT(1)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29)
-#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31)
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D BIT(12)
+#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D BIT(19)
-#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6)
-#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15)
-#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16)
+#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN BIT(6)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN BIT(15)
+#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN BIT(16)
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4)
-#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL BIT(4)
+#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS BIT(2)
-#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16)
+#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (3u << 16)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3)
-#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN BIT(1)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN BIT(2)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN BIT(3)
+#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN BIT(31)
#define IXGBE_KX4_LINK_CNTL_1 0x4C
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX (1 << 16)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 (1 << 17)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX (1 << 24)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 (1 << 25)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE (1 << 29)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP (1 << 30)
-#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART (1 << 31)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX BIT(16)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_CAP_KX4 BIT(17)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX BIT(24)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_EEE_CAP_KX4 BIT(25)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_ENABLE BIT(29)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_FORCE_LINK_UP BIT(30)
+#define IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART BIT(31)
#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144
#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148
@@ -3584,12 +3656,17 @@ struct ixgbe_info {
#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28
#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7
#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31
-#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
+#define IXGBE_SB_IOSF_CTRL_BUSY BIT(IXGBE_SB_IOSF_CTRL_BUSY_SHIFT)
#define IXGBE_SB_IOSF_TARGET_KR_PHY 0
#define IXGBE_SB_IOSF_TARGET_KX4_UNIPHY 1
#define IXGBE_SB_IOSF_TARGET_KX4_PCS0 2
#define IXGBE_SB_IOSF_TARGET_KX4_PCS1 3
#define IXGBE_NW_MNG_IF_SEL 0x00011178
+#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT BIT(1)
+#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M BIT(23)
#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE BIT(24)
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3
+#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \
+ (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT)
#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 2358c1b7d586..f2b1d48a16c3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2014 Intel Corporation.
+ Copyright(c) 1999 - 2016 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -214,8 +214,8 @@ s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
- eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ eeprom->word_size = BIT(eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
@@ -747,6 +747,25 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_init_swfw_sync_X540 - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function reset hardware semaphore bits for a semaphore that may
+ * have be left locked due to a catastrophic failure.
+ **/
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw)
+{
+ /* First try to grab the semaphore but we don't need to bother
+ * looking to see whether we got the lock or not since we do
+ * the same thing regardless of whether we got the lock or not.
+ * We got the lock - we release it.
+ * We timeout trying to get the lock - we force its release.
+ */
+ ixgbe_get_swfw_sync_semaphore(hw);
+ ixgbe_release_swfw_sync_semaphore(hw);
+}
+
+/**
* ixgbe_blink_led_start_X540 - Blink LED based on index.
* @hw: pointer to hardware structure
* @index: led number to blink
@@ -810,7 +829,7 @@ s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index)
return 0;
}
-static struct ixgbe_mac_operations mac_ops_X540 = {
+static const struct ixgbe_mac_operations mac_ops_X540 = {
.init_hw = &ixgbe_init_hw_generic,
.reset_hw = &ixgbe_reset_hw_X540,
.start_hw = &ixgbe_start_hw_X540,
@@ -846,6 +865,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.clear_vfta = &ixgbe_clear_vfta_generic,
.set_vfta = &ixgbe_set_vfta_generic,
.fc_enable = &ixgbe_fc_enable_generic,
+ .setup_fc = ixgbe_setup_fc_generic,
.set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic,
.init_uta_tables = &ixgbe_init_uta_tables_generic,
.setup_sfp = NULL,
@@ -853,6 +873,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
.release_swfw_sync = &ixgbe_release_swfw_sync_X540,
+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
.disable_rx_buff = &ixgbe_disable_rx_buff_generic,
.enable_rx_buff = &ixgbe_enable_rx_buff_generic,
.get_thermal_sensor_data = NULL,
@@ -863,7 +884,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.disable_rx = &ixgbe_disable_rx_generic,
};
-static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_X540 = {
.init_params = &ixgbe_init_eeprom_params_X540,
.read = &ixgbe_read_eerd_X540,
.read_buffer = &ixgbe_read_eerd_buffer_X540,
@@ -874,7 +895,7 @@ static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
.update_checksum = &ixgbe_update_eeprom_checksum_X540,
};
-static struct ixgbe_phy_operations phy_ops_X540 = {
+static const struct ixgbe_phy_operations phy_ops_X540 = {
.identify = &ixgbe_identify_phy_generic,
.identify_sfp = &ixgbe_identify_sfp_module_generic,
.init = NULL,
@@ -897,7 +918,7 @@ static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X540)
};
-struct ixgbe_info ixgbe_X540_info = {
+const struct ixgbe_info ixgbe_X540_info = {
.mac = ixgbe_mac_X540,
.get_invariants = &ixgbe_get_invariants_X540,
.mac_ops = &mac_ops_X540,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
index a1468b1f4d8a..e21cd48491d3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h
@@ -36,4 +36,5 @@ s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask);
+void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw);
s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 68a9c646498e..4716ca499e67 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel 10 Gigabit PCI Express Linux driver
- * Copyright(c) 1999 - 2015 Intel Corporation.
+ * Copyright(c) 1999 - 2016 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,6 +27,7 @@
#include "ixgbe_phy.h"
static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *, ixgbe_link_speed);
+static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *);
static s32 ixgbe_get_invariants_X550_x(struct ixgbe_hw *hw)
{
@@ -272,16 +273,26 @@ out:
static s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw)
{
switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ if (hw->bus.lan_id)
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM;
+ else
+ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM;
+ return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_SFP:
/* set up for CS4227 usage */
hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
ixgbe_check_cs4227(hw);
+ /* Fallthrough */
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
return ixgbe_identify_module_generic(hw);
case IXGBE_DEV_ID_X550EM_X_KX4:
hw->phy.type = ixgbe_phy_x550em_kx4;
break;
case IXGBE_DEV_ID_X550EM_X_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
hw->phy.type = ixgbe_phy_x550em_kr;
break;
case IXGBE_DEV_ID_X550EM_X_1G_T:
@@ -324,8 +335,8 @@ static s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC(hw));
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
IXGBE_EEC_SIZE_SHIFT);
- eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ eeprom->word_size = BIT(eeprom_size +
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
@@ -412,6 +423,121 @@ out:
return ret;
}
+/**
+ * ixgbe_get_phy_token - Get the token for shared PHY access
+ * @hw: Pointer to hardware structure
+ */
+static s32 ixgbe_get_phy_token(struct ixgbe_hw *hw)
+{
+ struct ixgbe_hic_phy_token_req token_cmd;
+ s32 status;
+
+ token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
+ token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
+ token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ token_cmd.port_number = hw->bus.lan_id;
+ token_cmd.command_type = FW_PHY_TOKEN_REQ;
+ token_cmd.pad = 0;
+ status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (status)
+ return status;
+ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ return 0;
+ if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY)
+ return IXGBE_ERR_FW_RESP_INVALID;
+
+ return IXGBE_ERR_TOKEN_RETRY;
+}
+
+/**
+ * ixgbe_put_phy_token - Put the token for shared PHY access
+ * @hw: Pointer to hardware structure
+ */
+static s32 ixgbe_put_phy_token(struct ixgbe_hw *hw)
+{
+ struct ixgbe_hic_phy_token_req token_cmd;
+ s32 status;
+
+ token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD;
+ token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN;
+ token_cmd.hdr.cmd_or_resp.cmd_resv = 0;
+ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ token_cmd.port_number = hw->bus.lan_id;
+ token_cmd.command_type = FW_PHY_TOKEN_REL;
+ token_cmd.pad = 0;
+ status = ixgbe_host_interface_command(hw, &token_cmd, sizeof(token_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT,
+ true);
+ if (status)
+ return status;
+ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK)
+ return 0;
+ return IXGBE_ERR_FW_RESP_INVALID;
+}
+
+/**
+ * ixgbe_write_iosf_sb_reg_x550a - Write to IOSF PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Data to write to the register
+ **/
+static s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ __always_unused u32 device_type,
+ u32 data)
+{
+ struct ixgbe_hic_internal_phy_req write_cmd;
+
+ memset(&write_cmd, 0, sizeof(write_cmd));
+ write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+ write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ write_cmd.port_number = hw->bus.lan_id;
+ write_cmd.command_type = FW_INT_PHY_REQ_WRITE;
+ write_cmd.address = cpu_to_be16(reg_addr);
+ write_cmd.write_data = cpu_to_be32(data);
+
+ return ixgbe_host_interface_command(hw, &write_cmd, sizeof(write_cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, false);
+}
+
+/**
+ * ixgbe_read_iosf_sb_reg_x550a - Read from IOSF PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 3 bit device type
+ * @data: Pointer to read data from the register
+ **/
+static s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ __always_unused u32 device_type,
+ u32 *data)
+{
+ union {
+ struct ixgbe_hic_internal_phy_req cmd;
+ struct ixgbe_hic_internal_phy_resp rsp;
+ } hic;
+ s32 status;
+
+ memset(&hic, 0, sizeof(hic));
+ hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD;
+ hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN;
+ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
+ hic.cmd.port_number = hw->bus.lan_id;
+ hic.cmd.command_type = FW_INT_PHY_REQ_READ;
+ hic.cmd.address = cpu_to_be16(reg_addr);
+
+ status = ixgbe_host_interface_command(hw, &hic.cmd, sizeof(hic.cmd),
+ IXGBE_HI_COMMAND_TIMEOUT, true);
+
+ /* Extract the register value from the response. */
+ *data = be32_to_cpu(hic.rsp.read_data);
+
+ return status;
+}
+
/** ixgbe_read_ee_hostif_data_X550 - Read EEPROM word using a host interface
* command assuming that the semaphore is already obtained.
* @hw: pointer to hardware structure
@@ -436,8 +562,7 @@ static s32 ixgbe_read_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
/* one word */
buffer.length = cpu_to_be16(sizeof(u16));
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer),
+ status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT, false);
if (status)
return status;
@@ -487,7 +612,7 @@ static s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw,
buffer.address = cpu_to_be32((offset + current_word) * 2);
buffer.length = cpu_to_be16(words_to_read * 2);
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
+ status = ixgbe_host_interface_command(hw, &buffer,
sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT,
false);
@@ -770,8 +895,7 @@ static s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset,
buffer.data = data;
buffer.address = cpu_to_be32(offset * 2);
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer),
+ status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT, false);
return status;
}
@@ -813,8 +937,7 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN;
buffer.req.checksum = FW_DEFAULT_CHECKSUM;
- status = ixgbe_host_interface_command(hw, (u32 *)&buffer,
- sizeof(buffer),
+ status = ixgbe_host_interface_command(hw, &buffer, sizeof(buffer),
IXGBE_HI_COMMAND_TIMEOUT, false);
return status;
}
@@ -861,9 +984,9 @@ static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
- fw_cmd.port_number = (u8)hw->bus.lan_id;
+ fw_cmd.port_number = hw->bus.lan_id;
- status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
+ status = ixgbe_host_interface_command(hw, &fw_cmd,
sizeof(struct ixgbe_hic_disable_rxen),
IXGBE_HI_COMMAND_TIMEOUT, true);
@@ -1248,6 +1371,117 @@ i2c_err:
}
/**
+ * ixgbe_setup_mac_link_sfp_n - Setup internal PHY for native SFP
+ * @hw: pointer to hardware structure
+ *
+ * Configure the the integrated PHY for native SFP support.
+ */
+static s32
+ixgbe_setup_mac_link_sfp_n(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ __always_unused bool autoneg_wait_to_complete)
+{
+ bool setup_linear = false;
+ u32 reg_phy_int;
+ s32 rc;
+
+ /* Check if SFP module is supported and linear */
+ rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * SFP not present error is not excepted in the setup MAC link flow.
+ */
+ if (rc == IXGBE_ERR_SFP_NOT_PRESENT)
+ return 0;
+
+ if (!rc)
+ return rc;
+
+ /* Configure internal PHY for native SFI */
+ rc = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ &reg_phy_int);
+ if (rc)
+ return rc;
+
+ if (setup_linear) {
+ reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING;
+ reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR;
+ } else {
+ reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING;
+ reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR;
+ }
+
+ rc = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ reg_phy_int);
+ if (rc)
+ return rc;
+
+ /* Setup XFI/SFI internal link */
+ return ixgbe_setup_ixfi_x550em(hw, &speed);
+}
+
+/**
+ * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP
+ * @hw: pointer to hardware structure
+ *
+ * Configure the the integrated PHY for SFP support.
+ */
+static s32
+ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ __always_unused bool autoneg_wait_to_complete)
+{
+ u32 reg_slice, slice_offset;
+ bool setup_linear = false;
+ u16 reg_phy_ext;
+ s32 rc;
+
+ /* Check if SFP module is supported and linear */
+ rc = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear);
+
+ /* If no SFP module present, then return success. Return success since
+ * SFP not present error is not excepted in the setup MAC link flow.
+ */
+ if (rc == IXGBE_ERR_SFP_NOT_PRESENT)
+ return 0;
+
+ if (!rc)
+ return rc;
+
+ /* Configure internal PHY for KR/KX. */
+ ixgbe_setup_kr_speed_x550em(hw, speed);
+
+ if (!hw->phy.mdio.prtad || hw->phy.mdio.prtad == 0xFFFF)
+ return IXGBE_ERR_PHY_ADDR_INVALID;
+
+ /* Get external PHY device id */
+ rc = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB,
+ IXGBE_MDIO_ZERO_DEV_TYPE, &reg_phy_ext);
+ if (rc)
+ return rc;
+
+ /* When configuring quad port CS4223, the MAC instance is part
+ * of the slice offset.
+ */
+ if (reg_phy_ext == IXGBE_CS4223_PHY_ID)
+ slice_offset = (hw->bus.lan_id +
+ (hw->bus.instance_id << 1)) << 12;
+ else
+ slice_offset = hw->bus.lan_id << 12;
+
+ /* Configure CS4227/CS4223 LINE side to proper mode. */
+ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset;
+ if (setup_linear)
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 1;
+ else
+ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 1;
+ return hw->phy.ops.write_reg(hw, reg_slice, IXGBE_MDIO_ZERO_DEV_TYPE,
+ reg_phy_ext);
+}
+
+/**
* ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed
* @hw: pointer to hardware structure
* @speed: new link speed
@@ -1326,6 +1560,57 @@ static s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw,
return 0;
}
+/**
+ * ixgbe_setup_sgmii - Set up link for sgmii
+ * @hw: pointer to hardware structure
+ */
+static s32
+ixgbe_setup_sgmii(struct ixgbe_hw *hw, __always_unused ixgbe_link_speed speed,
+ __always_unused bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 lval, sval;
+ s32 rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval);
+ if (rc)
+ return rc;
+
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE;
+ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN;
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+ if (rc)
+ return rc;
+
+ rc = mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval);
+ if (rc)
+ return rc;
+
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D;
+ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, sval);
+ if (rc)
+ return rc;
+
+ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+ rc = mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, lval);
+
+ return rc;
+}
+
/** ixgbe_init_mac_link_ops_X550em - init mac link function pointers
* @hw: pointer to hardware structure
**/
@@ -1333,6 +1618,8 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
+ mac->ops.setup_fc = ixgbe_setup_fc_x550em;
+
switch (mac->ops.get_media_type(hw)) {
case ixgbe_media_type_fiber:
/* CS4227 does not support autoneg, so disable the laser control
@@ -1342,13 +1629,31 @@ static void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw)
mac->ops.enable_tx_laser = NULL;
mac->ops.flap_tx_laser = NULL;
mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber;
- mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em;
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
+ mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_n;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550a;
+ break;
+ default:
+ mac->ops.setup_mac_link =
+ ixgbe_setup_mac_link_sfp_x550em;
+ break;
+ }
mac->ops.set_rate_select_speed =
ixgbe_set_soft_rate_select_speed;
break;
case ixgbe_media_type_copper:
mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em;
+ mac->ops.setup_fc = ixgbe_setup_fc_generic;
mac->ops.check_link = ixgbe_check_link_t_X550em;
+ return;
+ case ixgbe_media_type_backplane:
+ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII ||
+ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L)
+ mac->ops.setup_link = ixgbe_setup_sgmii;
break;
default:
break;
@@ -1614,7 +1919,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
s32 status;
u32 reg_val;
- status = ixgbe_read_iosf_sb_reg_x550(hw,
+ status = hw->mac.ops.read_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, &reg_val);
if (status)
@@ -1636,7 +1941,7 @@ static s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw,
/* Restart auto-negotiation. */
reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw,
+ status = hw->mac.ops.write_iosf_sb_reg(hw,
IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
@@ -1653,9 +1958,9 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
s32 status;
u32 reg_val;
- status = ixgbe_read_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
- hw->bus.lan_id, &reg_val);
+ status = hw->mac.ops.read_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
+ IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+ hw->bus.lan_id, &reg_val);
if (status)
return status;
@@ -1674,20 +1979,24 @@ static s32 ixgbe_setup_kx4_x550em(struct ixgbe_hw *hw)
/* Restart auto-negotiation. */
reg_val |= IXGBE_KX4_LINK_CNTL_1_TETH_AN_RESTART;
- status = ixgbe_write_iosf_sb_reg_x550(hw, IXGBE_KX4_LINK_CNTL_1,
- IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
- hw->bus.lan_id, reg_val);
+ status = hw->mac.ops.write_iosf_sb_reg(hw, IXGBE_KX4_LINK_CNTL_1,
+ IXGBE_SB_IOSF_TARGET_KX4_PCS0 +
+ hw->bus.lan_id, reg_val);
return status;
}
-/** ixgbe_setup_kr_x550em - Configure the KR PHY.
- * @hw: pointer to hardware structure
+/**
+ * ixgbe_setup_kr_x550em - Configure the KR PHY
+ * @hw: pointer to hardware structure
*
- * Configures the integrated KR PHY.
+ * Configures the integrated KR PHY for X550EM_x.
**/
static s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw)
{
+ if (hw->mac.type != ixgbe_mac_X550EM_x)
+ return 0;
+
return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised);
}
@@ -1842,6 +2151,86 @@ static s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw,
return status;
}
+/**
+ * ixgbe_setup_fc_x550em - Set up flow control
+ * @hw: pointer to hardware structure
+ */
+static s32 ixgbe_setup_fc_x550em(struct ixgbe_hw *hw)
+{
+ bool pause, asm_dir;
+ u32 reg_val;
+ s32 rc;
+
+ /* Validate the requested mode */
+ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
+ hw_err(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
+ return IXGBE_ERR_INVALID_LINK_SETTINGS;
+ }
+
+ /* 10gig parts do not have a word in the EEPROM to determine the
+ * default flow control setting, so we explicitly set it to full.
+ */
+ if (hw->fc.requested_mode == ixgbe_fc_default)
+ hw->fc.requested_mode = ixgbe_fc_full;
+
+ /* Determine PAUSE and ASM_DIR bits. */
+ switch (hw->fc.requested_mode) {
+ case ixgbe_fc_none:
+ pause = false;
+ asm_dir = false;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause = false;
+ asm_dir = true;
+ break;
+ case ixgbe_fc_rx_pause:
+ /* Rx Flow control is enabled and Tx Flow control is
+ * disabled by software override. Since there really
+ * isn't a way to advertise that we are capable of RX
+ * Pause ONLY, we will advertise that we support both
+ * symmetric and asymmetric Rx PAUSE, as such we fall
+ * through to the fc_full statement. Later, we will
+ * disable the adapter's ability to send PAUSE frames.
+ */
+ /* Fallthrough */
+ case ixgbe_fc_full:
+ pause = true;
+ asm_dir = true;
+ break;
+ default:
+ hw_err(hw, "Flow control param set incorrectly\n");
+ return IXGBE_ERR_CONFIG;
+ }
+
+ if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR &&
+ hw->device_id != IXGBE_DEV_ID_X550EM_A_KR &&
+ hw->device_id != IXGBE_DEV_ID_X550EM_A_KR_L)
+ return 0;
+
+ rc = hw->mac.ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ &reg_val);
+ if (rc)
+ return rc;
+
+ reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE |
+ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE);
+ if (pause)
+ reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE;
+ if (asm_dir)
+ reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE;
+ rc = hw->mac.ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY,
+ reg_val);
+
+ /* This device does not fully support AN. */
+ hw->fc.disable_fc_autoneg = true;
+
+ return rc;
+}
+
/** ixgbe_enter_lplu_x550em - Transition to low power states
* @hw: pointer to hardware structure
*
@@ -1939,6 +2328,36 @@ static s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw)
return status;
}
+/**
+ * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register
+ * @hw: pointer to hardware structure
+ *
+ * Read NW_MNG_IF_SEL register and save field values.
+ */
+static void ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw)
+{
+ /* Save NW management interface connected on board. This is used
+ * to determine internal PHY mode.
+ */
+ hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
+
+ /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set
+ * PHY address. This register field was has only been used for X552.
+ */
+ if (!hw->phy.nw_mng_if_sel) {
+ if (hw->mac.type == ixgbe_mac_x550em_a) {
+ struct ixgbe_adapter *adapter = hw->back;
+
+ e_warn(drv, "nw_mng_if_sel not set\n");
+ }
+ return;
+ }
+
+ hw->phy.mdio.prtad = (hw->phy.nw_mng_if_sel &
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >>
+ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT;
+}
+
/** ixgbe_init_phy_ops_X550em - PHY/SFP specific init
* @hw: pointer to hardware structure
*
@@ -1953,14 +2372,11 @@ static s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw)
hw->mac.ops.set_lan_id(hw);
+ ixgbe_read_mng_if_sel_x550em(hw);
+
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) {
phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM;
ixgbe_setup_mux_ctl(hw);
-
- /* Save NW management interface connected on board. This is used
- * to determine internal PHY mode.
- */
- phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL);
}
/* Identify the PHY or SFP module */
@@ -2023,16 +2439,24 @@ static enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw)
/* Detect if there is a copper PHY attached. */
switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_A_SGMII:
+ case IXGBE_DEV_ID_X550EM_A_SGMII_L:
+ hw->phy.type = ixgbe_phy_sgmii;
+ /* Fallthrough */
case IXGBE_DEV_ID_X550EM_X_KR:
case IXGBE_DEV_ID_X550EM_X_KX4:
+ case IXGBE_DEV_ID_X550EM_A_KR:
+ case IXGBE_DEV_ID_X550EM_A_KR_L:
media_type = ixgbe_media_type_backplane;
break;
case IXGBE_DEV_ID_X550EM_X_SFP:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ case IXGBE_DEV_ID_X550EM_A_SFP_N:
media_type = ixgbe_media_type_fiber;
break;
case IXGBE_DEV_ID_X550EM_X_1G_T:
case IXGBE_DEV_ID_X550EM_X_10G_T:
- media_type = ixgbe_media_type_copper;
+ media_type = ixgbe_media_type_copper;
break;
default:
media_type = ixgbe_media_type_unknown;
@@ -2080,6 +2504,27 @@ static s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw)
return status;
}
+/**
+ * ixgbe_set_mdio_speed - Set MDIO clock speed
+ * @hw: pointer to hardware structure
+ */
+static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw)
+{
+ u32 hlreg0;
+
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_X550EM_X_10G_T:
+ case IXGBE_DEV_ID_X550EM_A_SFP:
+ /* Config MDIO clock speed before the first MDIO PHY access */
+ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
+ hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
+ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
+ break;
+ default:
+ break;
+ }
+}
+
/** ixgbe_reset_hw_X550em - Perform hardware reset
** @hw: pointer to hardware structure
**
@@ -2093,7 +2538,6 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
s32 status;
u32 ctrl = 0;
u32 i;
- u32 hlreg0;
bool link_up = false;
/* Call adapter stop to disable Tx/Rx and clear interrupts */
@@ -2179,11 +2623,7 @@ mac_reset_top:
hw->mac.num_rar_entries = 128;
hw->mac.ops.init_rx_addrs(hw);
- if (hw->device_id == IXGBE_DEV_ID_X550EM_X_10G_T) {
- hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- hlreg0 &= ~IXGBE_HLREG0_MDCSPD;
- IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- }
+ ixgbe_set_mdio_speed(hw);
if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP)
ixgbe_setup_mux_ctl(hw);
@@ -2206,9 +2646,9 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
if (enable)
- pfvfspoof |= (1 << vf_target_shift);
+ pfvfspoof |= BIT(vf_target_shift);
else
- pfvfspoof &= ~(1 << vf_target_shift);
+ pfvfspoof &= ~BIT(vf_target_shift);
IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
}
@@ -2296,6 +2736,110 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
ixgbe_release_swfw_sync_X540(hw, mask);
}
+/**
+ * ixgbe_acquire_swfw_sync_x550em_a - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore and get the shared PHY token as needed
+ */
+static s32 ixgbe_acquire_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
+ int retries = FW_PHY_TOKEN_RETRIES;
+ s32 status;
+
+ while (--retries) {
+ status = 0;
+ if (hmask)
+ status = ixgbe_acquire_swfw_sync_X540(hw, hmask);
+ if (status)
+ return status;
+ if (!(mask & IXGBE_GSSR_TOKEN_SM))
+ return 0;
+
+ status = ixgbe_get_phy_token(hw);
+ if (!status)
+ return 0;
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
+ if (status != IXGBE_ERR_TOKEN_RETRY)
+ return status;
+ msleep(FW_PHY_TOKEN_DELAY);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_release_swfw_sync_x550em_a - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Release the SWFW semaphore and puts the shared PHY token as needed
+ */
+static void ixgbe_release_swfw_sync_x550em_a(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM;
+
+ if (mask & IXGBE_GSSR_TOKEN_SM)
+ ixgbe_put_phy_token(hw);
+
+ if (hmask)
+ ixgbe_release_swfw_sync_X540(hw, hmask);
+}
+
+/**
+ * ixgbe_read_phy_reg_x550a - Reads specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit address of PHY register to read
+ * @phy_data: Pointer to read data from PHY register
+ *
+ * Reads a value from a specified PHY register using the SWFW lock and PHY
+ * Token. The PHY Token is needed since the MDIO is shared between to MAC
+ * instances.
+ */
+static s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 *phy_data)
+{
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+ s32 status;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data);
+
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_phy_reg_x550a - Writes specified PHY register
+ * @hw: pointer to hardware structure
+ * @reg_addr: 32 bit PHY register to write
+ * @device_type: 5 bit device type
+ * @phy_data: Data to write to the PHY register
+ *
+ * Writes a value to specified PHY register using the SWFW lock and PHY Token.
+ * The PHY Token is needed since the MDIO is shared between to MAC instances.
+ */
+static s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr,
+ u32 device_type, u16 phy_data)
+{
+ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM;
+ s32 status;
+
+ if (hw->mac.ops.acquire_swfw_sync(hw, mask))
+ return IXGBE_ERR_SWFW_SYNC;
+
+ status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, phy_data);
+ hw->mac.ops.release_swfw_sync(hw, mask);
+
+ return status;
+}
+
#define X550_COMMON_MAC \
.init_hw = &ixgbe_init_hw_generic, \
.start_hw = &ixgbe_start_hw_X540, \
@@ -2337,12 +2881,10 @@ static void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask)
.enable_rx_buff = &ixgbe_enable_rx_buff_generic, \
.get_thermal_sensor_data = NULL, \
.init_thermal_sensor_thresh = NULL, \
- .prot_autoc_read = &prot_autoc_read_generic, \
- .prot_autoc_write = &prot_autoc_write_generic, \
.enable_rx = &ixgbe_enable_rx_generic, \
.disable_rx = &ixgbe_disable_rx_x550, \
-static struct ixgbe_mac_operations mac_ops_X550 = {
+static const struct ixgbe_mac_operations mac_ops_X550 = {
X550_COMMON_MAC
.reset_hw = &ixgbe_reset_hw_X540,
.get_media_type = &ixgbe_get_media_type_X540,
@@ -2354,20 +2896,45 @@ static struct ixgbe_mac_operations mac_ops_X550 = {
.setup_sfp = NULL,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540,
.release_swfw_sync = &ixgbe_release_swfw_sync_X540,
+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
+ .prot_autoc_read = prot_autoc_read_generic,
+ .prot_autoc_write = prot_autoc_write_generic,
+ .setup_fc = ixgbe_setup_fc_generic,
};
-static struct ixgbe_mac_operations mac_ops_X550EM_x = {
+static const struct ixgbe_mac_operations mac_ops_X550EM_x = {
X550_COMMON_MAC
.reset_hw = &ixgbe_reset_hw_X550em,
.get_media_type = &ixgbe_get_media_type_X550em,
.get_san_mac_addr = NULL,
.get_wwn_prefix = NULL,
- .setup_link = NULL, /* defined later */
+ .setup_link = &ixgbe_setup_mac_link_X540,
.get_link_capabilities = &ixgbe_get_link_capabilities_X550em,
.get_bus_info = &ixgbe_get_bus_info_X550em,
.setup_sfp = ixgbe_setup_sfp_modules_X550em,
.acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X550em,
.release_swfw_sync = &ixgbe_release_swfw_sync_X550em,
+ .init_swfw_sync = &ixgbe_init_swfw_sync_X540,
+ .setup_fc = NULL, /* defined later */
+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550,
+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550,
+};
+
+static struct ixgbe_mac_operations mac_ops_x550em_a = {
+ X550_COMMON_MAC
+ .reset_hw = ixgbe_reset_hw_X550em,
+ .get_media_type = ixgbe_get_media_type_X550em,
+ .get_san_mac_addr = NULL,
+ .get_wwn_prefix = NULL,
+ .setup_link = NULL, /* defined later */
+ .get_link_capabilities = ixgbe_get_link_capabilities_X550em,
+ .get_bus_info = ixgbe_get_bus_info_X550em,
+ .setup_sfp = ixgbe_setup_sfp_modules_X550em,
+ .acquire_swfw_sync = ixgbe_acquire_swfw_sync_x550em_a,
+ .release_swfw_sync = ixgbe_release_swfw_sync_x550em_a,
+ .setup_fc = ixgbe_setup_fc_x550em,
+ .read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a,
+ .write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a,
};
#define X550_COMMON_EEP \
@@ -2379,12 +2946,12 @@ static struct ixgbe_mac_operations mac_ops_X550EM_x = {
.update_checksum = &ixgbe_update_eeprom_checksum_X550, \
.calc_checksum = &ixgbe_calc_eeprom_checksum_X550, \
-static struct ixgbe_eeprom_operations eeprom_ops_X550 = {
+static const struct ixgbe_eeprom_operations eeprom_ops_X550 = {
X550_COMMON_EEP
.init_params = &ixgbe_init_eeprom_params_X550,
};
-static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
+static const struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
X550_COMMON_EEP
.init_params = &ixgbe_init_eeprom_params_X540,
};
@@ -2398,23 +2965,25 @@ static struct ixgbe_eeprom_operations eeprom_ops_X550EM_x = {
.read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, \
.read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, \
.write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, \
- .read_reg = &ixgbe_read_phy_reg_generic, \
- .write_reg = &ixgbe_write_phy_reg_generic, \
.setup_link = &ixgbe_setup_phy_link_generic, \
.set_phy_power = NULL, \
.check_overtemp = &ixgbe_tn_check_overtemp, \
.get_firmware_version = &ixgbe_get_phy_firmware_version_generic,
-static struct ixgbe_phy_operations phy_ops_X550 = {
+static const struct ixgbe_phy_operations phy_ops_X550 = {
X550_COMMON_PHY
.init = NULL,
.identify = &ixgbe_identify_phy_generic,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
};
-static struct ixgbe_phy_operations phy_ops_X550EM_x = {
+static const struct ixgbe_phy_operations phy_ops_X550EM_x = {
X550_COMMON_PHY
.init = &ixgbe_init_phy_ops_X550em,
.identify = &ixgbe_identify_phy_x550em,
+ .read_reg = &ixgbe_read_phy_reg_generic,
+ .write_reg = &ixgbe_write_phy_reg_generic,
.read_i2c_combined = &ixgbe_read_i2c_combined_generic,
.write_i2c_combined = &ixgbe_write_i2c_combined_generic,
.read_i2c_combined_unlocked = &ixgbe_read_i2c_combined_generic_unlocked,
@@ -2422,6 +2991,14 @@ static struct ixgbe_phy_operations phy_ops_X550EM_x = {
&ixgbe_write_i2c_combined_generic_unlocked,
};
+static const struct ixgbe_phy_operations phy_ops_x550em_a = {
+ X550_COMMON_PHY
+ .init = &ixgbe_init_phy_ops_X550em,
+ .identify = &ixgbe_identify_phy_x550em,
+ .read_reg = &ixgbe_read_phy_reg_x550a,
+ .write_reg = &ixgbe_write_phy_reg_x550a,
+};
+
static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550)
};
@@ -2430,7 +3007,11 @@ static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = {
IXGBE_MVALS_INIT(X550EM_x)
};
-struct ixgbe_info ixgbe_X550_info = {
+static const u32 ixgbe_mvals_x550em_a[IXGBE_MVALS_IDX_LIMIT] = {
+ IXGBE_MVALS_INIT(X550EM_a)
+};
+
+const struct ixgbe_info ixgbe_X550_info = {
.mac = ixgbe_mac_X550,
.get_invariants = &ixgbe_get_invariants_X540,
.mac_ops = &mac_ops_X550,
@@ -2440,7 +3021,7 @@ struct ixgbe_info ixgbe_X550_info = {
.mvals = ixgbe_mvals_X550,
};
-struct ixgbe_info ixgbe_X550EM_x_info = {
+const struct ixgbe_info ixgbe_X550EM_x_info = {
.mac = ixgbe_mac_X550EM_x,
.get_invariants = &ixgbe_get_invariants_X550_x,
.mac_ops = &mac_ops_X550EM_x,
@@ -2449,3 +3030,13 @@ struct ixgbe_info ixgbe_X550EM_x_info = {
.mbx_ops = &mbx_ops_generic,
.mvals = ixgbe_mvals_X550EM_x,
};
+
+const struct ixgbe_info ixgbe_x550em_a_info = {
+ .mac = ixgbe_mac_x550em_a,
+ .get_invariants = &ixgbe_get_invariants_X550_x,
+ .mac_ops = &mac_ops_x550em_a,
+ .eeprom_ops = &eeprom_ops_X550EM_x,
+ .phy_ops = &phy_ops_x550em_a,
+ .mbx_ops = &mbx_ops_generic,
+ .mvals = ixgbe_mvals_x550em_a,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 58434584b16d..8617cae2f801 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -32,6 +32,12 @@
#define IXGBE_DEV_ID_X540_VF 0x1515
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
+#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
+
+#define IXGBE_DEV_ID_82599_VF_HV 0x152E
+#define IXGBE_DEV_ID_X540_VF_HV 0x1530
+#define IXGBE_DEV_ID_X550_VF_HV 0x1564
+#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 8
@@ -74,7 +80,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_RXDCTL_RLPML_EN 0x00008000
/* DCA Control */
-#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN BIT(11) /* Tx Desc writeback RO bit */
/* PSRTYPE bit definitions */
#define IXGBE_PSRTYPE_TCPHDR 0x00000010
@@ -296,16 +302,16 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */
#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */
-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */
-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */
-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */
-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */
-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */
-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */
-
-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */
-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+#define IXGBE_DCA_RXCTRL_DESC_DCA_EN BIT(5) /* Rx Desc enable */
+#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN BIT(6) /* Rx Desc header ena */
+#define IXGBE_DCA_RXCTRL_DATA_DCA_EN BIT(7) /* Rx Desc payload ena */
+#define IXGBE_DCA_RXCTRL_DESC_RRO_EN BIT(9) /* Rx rd Desc Relax Order */
+#define IXGBE_DCA_RXCTRL_DATA_WRO_EN BIT(13) /* Rx wr data Relax Order */
+#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN BIT(15) /* Rx wr header RO */
+
+#define IXGBE_DCA_TXCTRL_DESC_DCA_EN BIT(5) /* DCA Tx Desc enable */
+#define IXGBE_DCA_TXCTRL_DESC_RRO_EN BIT(9) /* Tx rd Desc Relax Order */
+#define IXGBE_DCA_TXCTRL_DESC_WRO_EN BIT(11) /* Tx Desc writeback RO bit */
+#define IXGBE_DCA_TXCTRL_DATA_RRO_EN BIT(13) /* Tx rd data Relax Order */
#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index d7aa4b203f40..508e72c5f1c2 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -42,65 +42,54 @@
#define IXGBE_ALL_RAR_ENTRIES 16
+enum {NETDEV_STATS, IXGBEVF_STATS};
+
struct ixgbe_stats {
char stat_string[ETH_GSTRING_LEN];
- struct {
- int sizeof_stat;
- int stat_offset;
- int base_stat_offset;
- int saved_reset_offset;
- };
+ int type;
+ int sizeof_stat;
+ int stat_offset;
};
-#define IXGBEVF_STAT(m, b, r) { \
- .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
- .stat_offset = offsetof(struct ixgbevf_adapter, m), \
- .base_stat_offset = offsetof(struct ixgbevf_adapter, b), \
- .saved_reset_offset = offsetof(struct ixgbevf_adapter, r) \
+#define IXGBEVF_STAT(_name, _stat) { \
+ .stat_string = _name, \
+ .type = IXGBEVF_STATS, \
+ .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \
+ .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
}
-#define IXGBEVF_ZSTAT(m) { \
- .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, m), \
- .stat_offset = offsetof(struct ixgbevf_adapter, m), \
- .base_stat_offset = -1, \
- .saved_reset_offset = -1 \
+#define IXGBEVF_NETDEV_STAT(_net_stat) { \
+ .stat_string = #_net_stat, \
+ .type = NETDEV_STATS, \
+ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
+ .stat_offset = offsetof(struct net_device_stats, _net_stat) \
}
-static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
- {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
- stats.saved_reset_vfgprc)},
- {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
- stats.saved_reset_vfgptc)},
- {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
- stats.saved_reset_vfgorc)},
- {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
- stats.saved_reset_vfgotc)},
- {"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
- {"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
- {"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
- {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
- stats.saved_reset_vfmprc)},
- {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
-#ifdef BP_EXTENDED_STATS
- {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
- {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
- {"rx_bp_misses", IXGBEVF_ZSTAT(bp_rx_missed)},
- {"tx_bp_napi_yield", IXGBEVF_ZSTAT(bp_tx_yields)},
- {"tx_bp_cleaned", IXGBEVF_ZSTAT(bp_tx_cleaned)},
- {"tx_bp_misses", IXGBEVF_ZSTAT(bp_tx_missed)},
-#endif
+static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
+ IXGBEVF_NETDEV_STAT(rx_packets),
+ IXGBEVF_NETDEV_STAT(tx_packets),
+ IXGBEVF_NETDEV_STAT(rx_bytes),
+ IXGBEVF_NETDEV_STAT(tx_bytes),
+ IXGBEVF_STAT("tx_busy", tx_busy),
+ IXGBEVF_STAT("tx_restart_queue", restart_queue),
+ IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
+ IXGBEVF_NETDEV_STAT(multicast),
+ IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
};
-#define IXGBE_QUEUE_STATS_LEN 0
-#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+#define IXGBEVF_QUEUE_STATS_LEN ( \
+ (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
+ ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
+ (sizeof(struct ixgbe_stats) / sizeof(u64)))
+#define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
-#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)",
"Link test (on/offline)"
};
-#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
static int ixgbevf_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
@@ -177,7 +166,8 @@ static void ixgbevf_get_regs(struct net_device *netdev,
memset(p, 0, regs_len);
- regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+ /* generate a number suitable for ethtool's register version */
+ regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
/* General Registers */
regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
@@ -392,13 +382,13 @@ clear_reset:
return err;
}
-static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
+static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
{
switch (stringset) {
case ETH_SS_TEST:
- return IXGBE_TEST_LEN;
+ return IXGBEVF_TEST_LEN;
case ETH_SS_STATS:
- return IXGBE_GLOBAL_STATS_LEN;
+ return IXGBEVF_STATS_LEN;
default:
return -EINVAL;
}
@@ -408,70 +398,138 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- char *base = (char *)adapter;
- int i;
-#ifdef BP_EXTENDED_STATS
- u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
- tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
+ struct rtnl_link_stats64 temp;
+ const struct rtnl_link_stats64 *net_stats;
+ unsigned int start;
+ struct ixgbevf_ring *ring;
+ int i, j;
+ char *p;
- for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_yields += adapter->rx_ring[i]->stats.yields;
- rx_cleaned += adapter->rx_ring[i]->stats.cleaned;
- rx_yields += adapter->rx_ring[i]->stats.yields;
- }
+ ixgbevf_update_stats(adapter);
+ net_stats = dev_get_stats(netdev, &temp);
+ for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
+ switch (ixgbevf_gstrings_stats[i].type) {
+ case NETDEV_STATS:
+ p = (char *)net_stats +
+ ixgbevf_gstrings_stats[i].stat_offset;
+ break;
+ case IXGBEVF_STATS:
+ p = (char *)adapter +
+ ixgbevf_gstrings_stats[i].stat_offset;
+ break;
+ default:
+ data[i] = 0;
+ continue;
+ }
- for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_yields += adapter->tx_ring[i]->stats.yields;
- tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
- tx_yields += adapter->tx_ring[i]->stats.yields;
+ data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- adapter->bp_rx_yields = rx_yields;
- adapter->bp_rx_cleaned = rx_cleaned;
- adapter->bp_rx_missed = rx_missed;
+ /* populate Tx queue data */
+ for (j = 0; j < adapter->num_tx_queues; j++) {
+ ring = adapter->tx_ring[j];
+ if (!ring) {
+ data[i++] = 0;
+ data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+ data[i++] = 0;
+ data[i++] = 0;
+ data[i++] = 0;
+#endif
+ continue;
+ }
- adapter->bp_tx_yields = tx_yields;
- adapter->bp_tx_cleaned = tx_cleaned;
- adapter->bp_tx_missed = tx_missed;
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ data[i] = ring->stats.packets;
+ data[i + 1] = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ i += 2;
+#ifdef BP_EXTENDED_STATS
+ data[i] = ring->stats.yields;
+ data[i + 1] = ring->stats.misses;
+ data[i + 2] = ring->stats.cleaned;
+ i += 3;
#endif
+ }
- ixgbevf_update_stats(adapter);
- for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- char *p = base + ixgbe_gstrings_stats[i].stat_offset;
- char *b = base + ixgbe_gstrings_stats[i].base_stat_offset;
- char *r = base + ixgbe_gstrings_stats[i].saved_reset_offset;
-
- if (ixgbe_gstrings_stats[i].sizeof_stat == sizeof(u64)) {
- if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
- data[i] = *(u64 *)p - *(u64 *)b + *(u64 *)r;
- else
- data[i] = *(u64 *)p;
- } else {
- if (ixgbe_gstrings_stats[i].base_stat_offset >= 0)
- data[i] = *(u32 *)p - *(u32 *)b + *(u32 *)r;
- else
- data[i] = *(u32 *)p;
+ /* populate Rx queue data */
+ for (j = 0; j < adapter->num_rx_queues; j++) {
+ ring = adapter->rx_ring[j];
+ if (!ring) {
+ data[i++] = 0;
+ data[i++] = 0;
+#ifdef BP_EXTENDED_STATS
+ data[i++] = 0;
+ data[i++] = 0;
+ data[i++] = 0;
+#endif
+ continue;
}
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ data[i] = ring->stats.packets;
+ data[i + 1] = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ i += 2;
+#ifdef BP_EXTENDED_STATS
+ data[i] = ring->stats.yields;
+ data[i + 1] = ring->stats.misses;
+ data[i + 2] = ring->stats.cleaned;
+ i += 3;
+#endif
}
}
static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
char *p = (char *)data;
int i;
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, *ixgbe_gstrings_test,
- IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ IXGBEVF_TEST_LEN * ETH_GSTRING_LEN);
break;
case ETH_SS_STATS:
- for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
- memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+ for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ sprintf(p, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "tx_queue_%u_bp_napi_yield", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bp_misses", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bp_cleaned", i);
+ p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ sprintf(p, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+#ifdef BP_EXTENDED_STATS
+ sprintf(p, "rx_queue_%u_bp_poll_yield", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bp_misses", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bp_cleaned", i);
+ p += ETH_GSTRING_LEN;
+#endif /* BP_EXTENDED_STATS */
+ }
break;
}
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index 991eeae81473..be52f597688b 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -166,10 +166,10 @@ struct ixgbevf_ring {
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
-#define IXGBE_TX_FLAGS_CSUM (u32)(1)
-#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
-#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
-#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
+#define IXGBE_TX_FLAGS_CSUM BIT(0)
+#define IXGBE_TX_FLAGS_VLAN BIT(1)
+#define IXGBE_TX_FLAGS_TSO BIT(2)
+#define IXGBE_TX_FLAGS_IPV4 BIT(3)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
@@ -403,13 +403,6 @@ struct ixgbevf_adapter {
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
- /* Some features need tri-state capability,
- * thus the additional *_CAPABLE flags.
- */
- u32 flags;
-#define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1)
-#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
-
struct msix_entry *msix_entries;
/* OS defined structs */
@@ -429,16 +422,6 @@ struct ixgbevf_adapter {
unsigned int tx_ring_count;
unsigned int rx_ring_count;
-#ifdef BP_EXTENDED_STATS
- u64 bp_rx_yields;
- u64 bp_rx_cleaned;
- u64 bp_rx_missed;
-
- u64 bp_tx_yields;
- u64 bp_tx_cleaned;
- u64 bp_tx_missed;
-#endif
-
u8 __iomem *io_addr; /* Mainly for iounmap use */
u32 link_speed;
bool link_up;
@@ -461,13 +444,20 @@ enum ixbgevf_state_t {
__IXGBEVF_REMOVING,
__IXGBEVF_SERVICE_SCHED,
__IXGBEVF_SERVICE_INITED,
+ __IXGBEVF_RESET_REQUESTED,
+ __IXGBEVF_QUEUE_RESET_REQUESTED,
};
enum ixgbevf_boards {
board_82599_vf,
+ board_82599_vf_hv,
board_X540_vf,
+ board_X540_vf_hv,
board_X550_vf,
+ board_X550_vf_hv,
board_X550EM_x_vf,
+ board_X550EM_x_vf_hv,
+ board_x550em_a_vf,
};
enum ixgbevf_xcast_modes {
@@ -481,6 +471,13 @@ extern const struct ixgbevf_info ixgbevf_X540_vf_info;
extern const struct ixgbevf_info ixgbevf_X550_vf_info;
extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
+extern const struct ixgbevf_info ixgbevf_x550em_a_vf_info;
+
+extern const struct ixgbevf_info ixgbevf_82599_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X540_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X550_vf_hv_info;
+extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info;
+extern const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops;
/* needed by ethtool.c */
extern const char ixgbevf_driver_name[];
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index b0edae94d73d..d9d6616f02a4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -56,16 +56,21 @@ const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
-#define DRV_VERSION "2.12.1-k"
+#define DRV_VERSION "3.2.2-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
"Copyright (c) 2009 - 2015 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
- [board_82599_vf] = &ixgbevf_82599_vf_info,
- [board_X540_vf] = &ixgbevf_X540_vf_info,
- [board_X550_vf] = &ixgbevf_X550_vf_info,
- [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
+ [board_82599_vf] = &ixgbevf_82599_vf_info,
+ [board_82599_vf_hv] = &ixgbevf_82599_vf_hv_info,
+ [board_X540_vf] = &ixgbevf_X540_vf_info,
+ [board_X540_vf_hv] = &ixgbevf_X540_vf_hv_info,
+ [board_X550_vf] = &ixgbevf_X550_vf_info,
+ [board_X550_vf_hv] = &ixgbevf_X550_vf_hv_info,
+ [board_X550EM_x_vf] = &ixgbevf_X550EM_x_vf_info,
+ [board_X550EM_x_vf_hv] = &ixgbevf_X550EM_x_vf_hv_info,
+ [board_x550em_a_vf] = &ixgbevf_x550em_a_vf_info,
};
/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -78,9 +83,14 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
*/
static const struct pci_device_id ixgbevf_pci_tbl[] = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), board_82599_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF_HV), board_82599_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF), board_X540_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF_HV), board_X540_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF), board_X550_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550_VF_HV), board_X550_vf_hv },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF), board_X550EM_x_vf },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_VF_HV), board_X550EM_x_vf_hv},
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_VF), board_x550em_a_vf },
/* required last entry */
{0, }
};
@@ -268,7 +278,7 @@ static void ixgbevf_tx_timeout_reset(struct ixgbevf_adapter *adapter)
{
/* Do the reset outside of interrupt context */
if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) {
- adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
+ set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
ixgbevf_service_event_schedule(adapter);
}
}
@@ -288,9 +298,10 @@ static void ixgbevf_tx_timeout(struct net_device *netdev)
* ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
* @q_vector: board private structure
* @tx_ring: tx ring to clean
+ * @napi_budget: Used to determine if we are in netpoll
**/
static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
- struct ixgbevf_ring *tx_ring)
+ struct ixgbevf_ring *tx_ring, int napi_budget)
{
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbevf_tx_buffer *tx_buffer;
@@ -328,7 +339,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
total_packets += tx_buffer->gso_segs;
/* free the skb */
- dev_kfree_skb_any(tx_buffer->skb);
+ napi_consume_skb(tx_buffer->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -1013,8 +1024,10 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
int per_ring_budget, work_done = 0;
bool clean_complete = true;
- ixgbevf_for_each_ring(ring, q_vector->tx)
- clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
+ ixgbevf_for_each_ring(ring, q_vector->tx) {
+ if (!ixgbevf_clean_tx_irq(q_vector, ring, budget))
+ clean_complete = false;
+ }
if (budget <= 0)
return budget;
@@ -1035,7 +1048,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
int cleaned = ixgbevf_clean_rx_irq(q_vector, ring,
per_ring_budget);
work_done += cleaned;
- clean_complete &= (cleaned < per_ring_budget);
+ if (cleaned >= per_ring_budget)
+ clean_complete = false;
}
#ifdef CONFIG_NET_RX_BUSY_POLL
@@ -1052,7 +1066,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
if (!test_bit(__IXGBEVF_DOWN, &adapter->state) &&
!test_bit(__IXGBEVF_REMOVING, &adapter->state))
ixgbevf_irq_enable_queues(adapter,
- 1 << q_vector->v_idx);
+ BIT(q_vector->v_idx));
return 0;
}
@@ -1154,14 +1168,14 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
}
/* add q_vector eims value to global eims_enable_mask */
- adapter->eims_enable_mask |= 1 << v_idx;
+ adapter->eims_enable_mask |= BIT(v_idx);
ixgbevf_write_eitr(q_vector);
}
ixgbevf_set_ivar(adapter, -1, 1, v_idx);
/* setup eims_other and add value to global eims_enable_mask */
- adapter->eims_other = 1 << v_idx;
+ adapter->eims_other = BIT(v_idx);
adapter->eims_enable_mask |= adapter->eims_other;
}
@@ -1585,8 +1599,8 @@ static void ixgbevf_configure_tx_ring(struct ixgbevf_adapter *adapter,
txdctl |= (8 << 16); /* WTHRESH = 8 */
/* Setting PTHRESH to 32 both improves performance */
- txdctl |= (1 << 8) | /* HTHRESH = 1 */
- 32; /* PTHRESH = 32 */
+ txdctl |= (1u << 8) | /* HTHRESH = 1 */
+ 32; /* PTHRESH = 32 */
clear_bit(__IXGBEVF_HANG_CHECK_ARMED, &ring->state);
@@ -1642,7 +1656,7 @@ static void ixgbevf_setup_psrtype(struct ixgbevf_adapter *adapter)
IXGBE_PSRTYPE_L2HDR;
if (adapter->num_rx_queues > 1)
- psrtype |= 1 << 29;
+ psrtype |= BIT(29);
IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
}
@@ -1748,9 +1762,15 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(reg_idx),
ring->count * sizeof(union ixgbe_adv_rx_desc));
+#ifndef CONFIG_SPARC
/* enable relaxed ordering */
IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+#else
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(reg_idx),
+ IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_RXCTRL_DATA_WRO_EN);
+#endif
/* reset head and tail pointers */
IXGBE_WRITE_REG(hw, IXGBE_VFRDH(reg_idx), 0);
@@ -1782,16 +1802,19 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
**/
static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
{
- int i;
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
+ int i, ret;
ixgbevf_setup_psrtype(adapter);
if (hw->mac.type >= ixgbe_mac_X550_vf)
ixgbevf_setup_vfmrqc(adapter);
/* notify the PF of our intent to use this size of frame */
- ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
+ ret = hw->mac.ops.set_rlpml(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
+ if (ret)
+ dev_err(&adapter->pdev->dev,
+ "Failed to set MTU at %d\n", netdev->mtu);
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
@@ -1904,7 +1927,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
spin_lock_bh(&adapter->mbx_lock);
- hw->mac.ops.update_xcast_mode(hw, netdev, xcast_mode);
+ hw->mac.ops.update_xcast_mode(hw, xcast_mode);
/* reprogram multicast list */
hw->mac.ops.update_mc_addr_list(hw, netdev);
@@ -1984,7 +2007,7 @@ static int ixgbevf_configure_dcb(struct ixgbevf_adapter *adapter)
hw->mbx.timeout = 0;
/* wait for watchdog to come around and bail us out */
- adapter->flags |= IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
+ set_bit(__IXGBEVF_QUEUE_RESET_REQUESTED, &adapter->state);
}
return 0;
@@ -2052,7 +2075,7 @@ static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
spin_lock_bh(&adapter->mbx_lock);
while (api[idx] != ixgbe_mbox_api_unknown) {
- err = ixgbevf_negotiate_api_version(hw, api[idx]);
+ err = hw->mac.ops.negotiate_api_version(hw, api[idx]);
if (!err)
break;
idx++;
@@ -2749,19 +2772,20 @@ static void ixgbevf_service_timer(unsigned long data)
static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
{
- if (!(adapter->flags & IXGBEVF_FLAG_RESET_REQUESTED))
+ if (!test_and_clear_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state))
return;
- adapter->flags &= ~IXGBEVF_FLAG_RESET_REQUESTED;
-
/* If we're already down or resetting, just bail */
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_REMOVING, &adapter->state) ||
test_bit(__IXGBEVF_RESETTING, &adapter->state))
return;
adapter->tx_timeout_count++;
+ rtnl_lock();
ixgbevf_reinit_locked(adapter);
+ rtnl_unlock();
}
/**
@@ -2795,7 +2819,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
struct ixgbevf_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring)
- eics |= 1 << i;
+ eics |= BIT(i);
}
/* Cause software interrupt to ensure rings are cleaned */
@@ -2821,7 +2845,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
/* if check for link returns error we will need to reset */
if (err && time_after(jiffies, adapter->last_reset + (10 * HZ))) {
- adapter->flags |= IXGBEVF_FLAG_RESET_REQUESTED;
+ set_bit(__IXGBEVF_RESET_REQUESTED, &adapter->state);
link_up = false;
}
@@ -3222,11 +3246,10 @@ static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter)
{
struct net_device *dev = adapter->netdev;
- if (!(adapter->flags & IXGBEVF_FLAG_QUEUE_RESET_REQUESTED))
+ if (!test_and_clear_bit(__IXGBEVF_QUEUE_RESET_REQUESTED,
+ &adapter->state))
return;
- adapter->flags &= ~IXGBEVF_FLAG_QUEUE_RESET_REQUESTED;
-
/* if interface is down do nothing */
if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
test_bit(__IXGBEVF_RESETTING, &adapter->state))
@@ -3271,9 +3294,18 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first,
u8 *hdr_len)
{
+ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
struct sk_buff *skb = first->skb;
- u32 vlan_macip_lens, type_tucmd;
- u32 mss_l4len_idx, l4len;
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ union {
+ struct tcphdr *tcp;
+ unsigned char *hdr;
+ } l4;
+ u32 paylen, l4_offset;
int err;
if (skb->ip_summed != CHECKSUM_PARTIAL)
@@ -3286,49 +3318,53 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
if (err < 0)
return err;
+ ip.hdr = skb_network_header(skb);
+ l4.hdr = skb_checksum_start(skb);
+
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
- if (first->protocol == htons(ETH_P_IP)) {
- struct iphdr *iph = ip_hdr(skb);
-
- iph->tot_len = 0;
- iph->check = 0;
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ /* initialize outer IP header fields */
+ if (ip.v4->version == 4) {
+ /* IP header will have to cancel out any data that
+ * is not a part of the outer IP header
+ */
+ ip.v4->check = csum_fold(csum_add(lco_csum(skb),
+ csum_unfold(l4.tcp->check)));
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
+
+ ip.v4->tot_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM |
IXGBE_TX_FLAGS_IPV4;
- } else if (skb_is_gso_v6(skb)) {
- ipv6_hdr(skb)->payload_len = 0;
- tcp_hdr(skb)->check =
- ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
- &ipv6_hdr(skb)->daddr,
- 0, IPPROTO_TCP, 0);
+ } else {
+ ip.v6->payload_len = 0;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM;
}
- /* compute header lengths */
- l4len = tcp_hdrlen(skb);
- *hdr_len += l4len;
- *hdr_len = skb_transport_offset(skb) + l4len;
+ /* determine offset of inner transport header */
+ l4_offset = l4.hdr - skb->data;
+
+ /* compute length of segmentation header */
+ *hdr_len = (l4.tcp->doff * 4) + l4_offset;
- /* update GSO size and bytecount with header size */
+ /* remove payload length from inner checksum */
+ paylen = skb->len - l4_offset;
+ csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
+
+ /* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len;
/* mss_l4len_id: use 1 as index for TSO */
- mss_l4len_idx = l4len << IXGBE_ADVTXD_L4LEN_SHIFT;
+ mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT;
mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
- mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
+ mss_l4len_idx |= (1u << IXGBE_ADVTXD_IDX_SHIFT);
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
- vlan_macip_lens = skb_network_header_len(skb);
- vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
+ vlan_macip_lens = l4.hdr - ip.hdr;
+ vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
@@ -3337,76 +3373,55 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
return 1;
}
+static inline bool ixgbevf_ipv6_csum_is_sctp(struct sk_buff *skb)
+{
+ unsigned int offset = 0;
+
+ ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
+
+ return offset == skb_checksum_start_offset(skb);
+}
+
static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct ixgbevf_tx_buffer *first)
{
struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0;
- u32 mss_l4len_idx = 0;
u32 type_tucmd = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- u8 l4_hdr = 0;
- __be16 frag_off;
-
- switch (first->protocol) {
- case htons(ETH_P_IP):
- vlan_macip_lens |= skb_network_header_len(skb);
- type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
- l4_hdr = ip_hdr(skb)->protocol;
- break;
- case htons(ETH_P_IPV6):
- vlan_macip_lens |= skb_network_header_len(skb);
- l4_hdr = ipv6_hdr(skb)->nexthdr;
- if (likely(skb_network_header_len(skb) ==
- sizeof(struct ipv6hdr)))
- break;
- ipv6_skip_exthdr(skb, skb_network_offset(skb) +
- sizeof(struct ipv6hdr),
- &l4_hdr, &frag_off);
- if (unlikely(frag_off))
- l4_hdr = NEXTHDR_FRAGMENT;
- break;
- default:
- break;
- }
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ goto no_csum;
- switch (l4_hdr) {
- case IPPROTO_TCP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
- mss_l4len_idx = tcp_hdrlen(skb) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_SCTP:
- type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP;
- mss_l4len_idx = sizeof(struct sctphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
- break;
- case IPPROTO_UDP:
- mss_l4len_idx = sizeof(struct udphdr) <<
- IXGBE_ADVTXD_L4LEN_SHIFT;
+ switch (skb->csum_offset) {
+ case offsetof(struct tcphdr, check):
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ /* fall through */
+ case offsetof(struct udphdr, check):
+ break;
+ case offsetof(struct sctphdr, checksum):
+ /* validate that this is actually an SCTP request */
+ if (((first->protocol == htons(ETH_P_IP)) &&
+ (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
+ ((first->protocol == htons(ETH_P_IPV6)) &&
+ ixgbevf_ipv6_csum_is_sctp(skb))) {
+ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP;
break;
- default:
- if (unlikely(net_ratelimit())) {
- dev_warn(tx_ring->dev,
- "partial checksum, l3 proto=%x, l4 proto=%x\n",
- first->protocol, l4_hdr);
- }
- skb_checksum_help(skb);
- goto no_csum;
}
-
- /* update TX checksum flag */
- first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ /* fall through */
+ default:
+ skb_checksum_help(skb);
+ goto no_csum;
}
-
+ /* update TX checksum flag */
+ first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
+ vlan_macip_lens = skb_checksum_start_offset(skb) -
+ skb_network_offset(skb);
no_csum:
/* vlan_macip_lens: MACLEN, VLAN tag */
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
- ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens,
- type_tucmd, mss_l4len_idx);
+ ixgbevf_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
}
static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
@@ -3442,7 +3457,7 @@ static void ixgbevf_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
/* use index 1 context for TSO/FSO/FCOE */
if (tx_flags & IXGBE_TX_FLAGS_TSO)
- olinfo_status |= cpu_to_le32(1 << IXGBE_ADVTXD_IDX_SHIFT);
+ olinfo_status |= cpu_to_le32(1u << IXGBE_ADVTXD_IDX_SHIFT);
/* Check Context must be set if Tx switch is enabled, which it
* always is for case where virtual functions are running
@@ -3725,6 +3740,7 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
struct ixgbe_hw *hw = &adapter->hw;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
int max_possible_frame = MAXIMUM_ETHERNET_VLAN_SIZE;
+ int ret;
switch (adapter->hw.api_version) {
case ixgbe_mbox_api_11:
@@ -3741,14 +3757,17 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
if ((new_mtu < 68) || (max_frame > max_possible_frame))
return -EINVAL;
+ /* notify the PF of our intent to use this size of frame */
+ ret = hw->mac.ops.set_rlpml(hw, max_frame);
+ if (ret)
+ return -EINVAL;
+
hw_dbg(hw, "changing MTU from %d to %d\n",
netdev->mtu, new_mtu);
+
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
- /* notify the PF of our intent to use this size of frame */
- ixgbevf_rlpml_set_vf(hw, max_frame);
-
return 0;
}
@@ -3890,6 +3909,40 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats;
}
+#define IXGBEVF_MAX_MAC_HDR_LEN 127
+#define IXGBEVF_MAX_NETWORK_HDR_LEN 511
+
+static netdev_features_t
+ixgbevf_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ unsigned int network_hdr_len, mac_hdr_len;
+
+ /* Make certain the headers can be described by a context descriptor */
+ mac_hdr_len = skb_network_header(skb) - skb->data;
+ if (unlikely(mac_hdr_len > IXGBEVF_MAX_MAC_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
+ if (unlikely(network_hdr_len > IXGBEVF_MAX_NETWORK_HDR_LEN))
+ return features & ~(NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC |
+ NETIF_F_TSO |
+ NETIF_F_TSO6);
+
+ /* We can only support IPV4 TSO in tunnels if we can mangle the
+ * inner IP ID field, so strip TSO if MANGLEID is not supported.
+ */
+ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
+ features &= ~NETIF_F_TSO;
+
+ return features;
+}
+
static const struct net_device_ops ixgbevf_netdev_ops = {
.ndo_open = ixgbevf_open,
.ndo_stop = ixgbevf_close,
@@ -3908,7 +3961,7 @@ static const struct net_device_ops ixgbevf_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbevf_netpoll,
#endif
- .ndo_features_check = passthru_features_check,
+ .ndo_features_check = ixgbevf_features_check,
};
static void ixgbevf_assign_netdev_ops(struct net_device *dev)
@@ -4013,26 +4066,37 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
netdev->hw_features = NETIF_F_SG |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
NETIF_F_TSO |
NETIF_F_TSO6 |
- NETIF_F_RXCSUM;
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM |
+ NETIF_F_SCTP_CRC;
- netdev->features = netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+#define IXGBEVF_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
+ NETIF_F_GSO_GRE_CSUM | \
+ NETIF_F_GSO_IPXIP4 | \
+ NETIF_F_GSO_IPXIP6 | \
+ NETIF_F_GSO_UDP_TUNNEL | \
+ NETIF_F_GSO_UDP_TUNNEL_CSUM)
- netdev->vlan_features |= NETIF_F_TSO |
- NETIF_F_TSO6 |
- NETIF_F_IP_CSUM |
- NETIF_F_IPV6_CSUM |
- NETIF_F_SG;
+ netdev->gso_partial_features = IXGBEVF_GSO_PARTIAL_FEATURES;
+ netdev->hw_features |= NETIF_F_GSO_PARTIAL |
+ IXGBEVF_GSO_PARTIAL_FEATURES;
+
+ netdev->features = netdev->hw_features;
if (pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
+ netdev->mpls_features |= NETIF_F_HW_CSUM;
+ netdev->hw_enc_features |= netdev->vlan_features;
+
+ /* set this bit last since it cannot be part of vlan_features */
+ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
netdev->priv_flags |= IFF_UNICAST_FLT;
if (IXGBE_REMOVED(hw->hw_addr)) {
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index dc68fea4894b..2819abc454c7 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -85,7 +85,7 @@ static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_MBX;
if (!mbx->ops.read)
goto out;
@@ -111,7 +111,7 @@ out:
static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
- s32 ret_val = -IXGBE_ERR_MBX;
+ s32 ret_val = IXGBE_ERR_MBX;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops.write || !mbx->timeout)
@@ -346,3 +346,14 @@ const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
.check_for_rst = ixgbevf_check_for_rst_vf,
};
+/* Mailbox operations when running on Hyper-V.
+ * On Hyper-V, PF/VF communication is not through the
+ * hardware mailbox; this communication is through
+ * a software mediated path.
+ * Most mail box operations are noop while running on
+ * Hyper-V.
+ */
+const struct ixgbe_mbx_operations ixgbevf_hv_mbx_ops = {
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 4d613a4f2a7f..a52f70ec42b6 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -27,6 +27,24 @@
#include "vf.h"
#include "ixgbevf.h"
+/* On Hyper-V, to reset, we need to read from this offset
+ * from the PCI config space. This is the mechanism used on
+ * Hyper-V to support PF/VF communication.
+ */
+#define IXGBE_HV_RESET_OFFSET 0x201
+
+static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
+ u32 *retmsg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 retval = mbx->ops.write_posted(hw, msg, size);
+
+ if (retval)
+ return retval;
+
+ return mbx->ops.read_posted(hw, retmsg, size);
+}
+
/**
* ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
@@ -126,6 +144,27 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
}
/**
+ * Hyper-V variant; the VF/PF communication is through the PCI
+ * config space.
+ */
+static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
+{
+#if IS_ENABLED(CONFIG_PCI_MMCONFIG)
+ struct ixgbevf_adapter *adapter = hw->back;
+ int i;
+
+ for (i = 0; i < 6; i++)
+ pci_read_config_byte(adapter->pdev,
+ (i + IXGBE_HV_RESET_OFFSET),
+ &hw->mac.perm_addr[i]);
+ return 0;
+#else
+ pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
+ return -EOPNOTSUPP;
+#endif
+}
+
+/**
* ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
* @hw: pointer to hardware structure
*
@@ -228,8 +267,7 @@ static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- u32 msgbuf[3];
+ u32 msgbuf[3], msgbuf_chk;
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
@@ -241,23 +279,27 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
*/
msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
+ msgbuf_chk = msgbuf[0];
+
if (addr)
ether_addr_copy(msg_addr, addr);
- ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
- if (!ret_val)
- ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
+ if (!ret_val) {
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
-
- if (!ret_val)
- if (msgbuf[0] ==
- (IXGBE_VF_SET_MACVLAN | IXGBE_VT_MSGTYPE_NACK))
- ret_val = -ENOMEM;
+ if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_NACK))
+ return -ENOMEM;
+ }
return ret_val;
}
+static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
+{
+ return -EOPNOTSUPP;
+}
+
/**
* ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
* @adapter: pointer to the port handle
@@ -391,7 +433,6 @@ int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
u32 vmdq)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[3];
u8 *msg_addr = (u8 *)(&msgbuf[1]);
s32 ret_val;
@@ -399,10 +440,8 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
memset(msgbuf, 0, sizeof(msgbuf));
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
ether_addr_copy(msg_addr, addr);
- ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
- if (!ret_val)
- ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -416,15 +455,24 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
return ret_val;
}
-static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
- u32 *msg, u16 size)
+/**
+ * ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: Unused in this implementation
+ *
+ * We don't really allow setting the device MAC address. However,
+ * if the address being set is the permanent MAC address we will
+ * permit that.
+ **/
+static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
+ u32 vmdq)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- u32 retmsg[IXGBE_VFMAILBOX_SIZE];
- s32 retval = mbx->ops.write_posted(hw, msg, size);
+ if (ether_addr_equal(addr, hw->mac.perm_addr))
+ return 0;
- if (!retval)
- mbx->ops.read_posted(hw, retmsg, size);
+ return -EOPNOTSUPP;
}
/**
@@ -467,23 +515,29 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
- ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+ ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);
return 0;
}
/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
+ struct net_device *netdev)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_update_xcast_mode - Update Multicast mode
* @hw: pointer to the HW structure
- * @netdev: pointer to net device structure
* @xcast_mode: new multicast mode
*
* Updates the Multicast Mode of VF.
**/
-static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
- struct net_device *netdev, int xcast_mode)
+static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
s32 err;
@@ -497,11 +551,7 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
msgbuf[1] = xcast_mode;
- err = mbx->ops.write_posted(hw, msgbuf, 2);
- if (err)
- return err;
-
- err = mbx->ops.read_posted(hw, msgbuf, 2);
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
if (err)
return err;
@@ -513,6 +563,14 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
}
/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID
@@ -522,7 +580,6 @@ static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
s32 err;
@@ -531,11 +588,7 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
- err = mbx->ops.write_posted(hw, msgbuf, 2);
- if (err)
- goto mbx_err;
-
- err = mbx->ops.read_posted(hw, msgbuf, 2);
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
if (err)
goto mbx_err;
@@ -551,6 +604,15 @@ mbx_err:
}
/**
+ * Hyper-V variant - just a stub.
+ */
+static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ return -EOPNOTSUPP;
+}
+
+/**
* ixgbevf_setup_mac_link_vf - Setup MAC link settings
* @hw: pointer to hardware structure
* @speed: Unused in this implementation
@@ -656,25 +718,116 @@ out:
}
/**
- * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ * Hyper-V variant; there is no mailbox communication.
+ */
+static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ u32 links_reg;
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ udelay(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return 0;
+}
+
+/**
+ * ixgbevf_set_rlpml_vf - Set the maximum receive packet length
* @hw: pointer to the HW structure
* @max_size: value to assign to max frame size
**/
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size)
+static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
{
u32 msgbuf[2];
+ s32 ret_val;
msgbuf[0] = IXGBE_VF_SET_LPE;
msgbuf[1] = max_size;
- ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ if (ret_val)
+ return ret_val;
+ if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
+ (msgbuf[0] & IXGBE_VT_MSGTYPE_NACK))
+ return IXGBE_ERR_MBX;
+
+ return 0;
+}
+
+/**
+ * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ * Hyper-V variant.
+ **/
+static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
+{
+ u32 reg;
+
+ /* If we are on Hyper-V, we implement this functionality
+ * differently.
+ */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
+ /* CRC == 4 */
+ reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
+
+ return 0;
}
/**
- * ixgbevf_negotiate_api_version - Negotiate supported API version
+ * ixgbevf_negotiate_api_version_vf - Negotiate supported API version
* @hw: pointer to the HW structure
* @api: integer containing requested API version
**/
-int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
{
int err;
u32 msg[3];
@@ -683,11 +836,8 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
msg[0] = IXGBE_VF_API_NEGOTIATE;
msg[1] = api;
msg[2] = 0;
- err = hw->mbx.ops.write_posted(hw, msg, 3);
-
- if (!err)
- err = hw->mbx.ops.read_posted(hw, msg, 3);
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3);
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -703,6 +853,21 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
return err;
}
+/**
+ * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ * Hyper-V version - only ixgbe_mbox_api_10 supported.
+ **/
+static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
+{
+ /* Hyper-V only supports api version ixgbe_mbox_api_10 */
+ if (api != ixgbe_mbox_api_10)
+ return IXGBE_ERR_INVALID_ARGUMENT;
+
+ return 0;
+}
+
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc)
{
@@ -721,11 +886,8 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
/* Fetch queue configuration from the PF */
msg[0] = IXGBE_VF_GET_QUEUE;
msg[1] = msg[2] = msg[3] = msg[4] = 0;
- err = hw->mbx.ops.write_posted(hw, msg, 5);
-
- if (!err)
- err = hw->mbx.ops.read_posted(hw, msg, 5);
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5);
if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
@@ -769,11 +931,30 @@ static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.stop_adapter = ixgbevf_stop_hw_vf,
.setup_link = ixgbevf_setup_mac_link_vf,
.check_link = ixgbevf_check_mac_link_vf,
+ .negotiate_api_version = ixgbevf_negotiate_api_version_vf,
.set_rar = ixgbevf_set_rar_vf,
.update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
.update_xcast_mode = ixgbevf_update_xcast_mode,
.set_uc_addr = ixgbevf_set_uc_addr_vf,
.set_vfta = ixgbevf_set_vfta_vf,
+ .set_rlpml = ixgbevf_set_rlpml_vf,
+};
+
+static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
+ .init_hw = ixgbevf_init_hw_vf,
+ .reset_hw = ixgbevf_hv_reset_hw_vf,
+ .start_hw = ixgbevf_start_hw_vf,
+ .get_mac_addr = ixgbevf_get_mac_addr_vf,
+ .stop_adapter = ixgbevf_stop_hw_vf,
+ .setup_link = ixgbevf_setup_mac_link_vf,
+ .check_link = ixgbevf_hv_check_mac_link_vf,
+ .negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf,
+ .set_rar = ixgbevf_hv_set_rar_vf,
+ .update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf,
+ .update_xcast_mode = ixgbevf_hv_update_xcast_mode,
+ .set_uc_addr = ixgbevf_hv_set_uc_addr_vf,
+ .set_vfta = ixgbevf_hv_set_vfta_vf,
+ .set_rlpml = ixgbevf_hv_set_rlpml_vf,
};
const struct ixgbevf_info ixgbevf_82599_vf_info = {
@@ -781,17 +962,42 @@ const struct ixgbevf_info ixgbevf_82599_vf_info = {
.mac_ops = &ixgbevf_mac_ops,
};
+const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
+ .mac = ixgbe_mac_82599_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
const struct ixgbevf_info ixgbevf_X540_vf_info = {
.mac = ixgbe_mac_X540_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
+ .mac = ixgbe_mac_X540_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
const struct ixgbevf_info ixgbevf_X550_vf_info = {
.mac = ixgbe_mac_X550_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
+ .mac = ixgbe_mac_X550_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
.mac = ixgbe_mac_X550EM_x_vf,
.mac_ops = &ixgbevf_mac_ops,
};
+
+const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
+ .mac = ixgbe_mac_X550EM_x_vf,
+ .mac_ops = &ixgbevf_hv_mac_ops,
+};
+
+const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
+ .mac = ixgbe_mac_x550em_a_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index ef9f7736b4dc..04d8d4ee4f04 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -51,6 +51,7 @@ struct ixgbe_mac_operations {
s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
s32 (*stop_adapter)(struct ixgbe_hw *);
s32 (*get_bus_info)(struct ixgbe_hw *);
+ s32 (*negotiate_api_version)(struct ixgbe_hw *hw, int api);
/* Link */
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
@@ -63,11 +64,12 @@ struct ixgbe_mac_operations {
s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
- s32 (*update_xcast_mode)(struct ixgbe_hw *, struct net_device *, int);
+ s32 (*update_xcast_mode)(struct ixgbe_hw *, int);
s32 (*enable_mc)(struct ixgbe_hw *);
s32 (*disable_mc)(struct ixgbe_hw *);
s32 (*clear_vfta)(struct ixgbe_hw *);
s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+ s32 (*set_rlpml)(struct ixgbe_hw *, u16);
};
enum ixgbe_mac_type {
@@ -76,6 +78,7 @@ enum ixgbe_mac_type {
ixgbe_mac_X540_vf,
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
+ ixgbe_mac_x550em_a_vf,
ixgbe_num_macs
};
@@ -207,8 +210,6 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
-void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
-int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc);
int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 3ddf657bc10b..836ebd8ee768 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -222,7 +222,7 @@ jme_clear_ghc_reset(struct jme_adapter *jme)
jwrite32f(jme, JME_GHC, jme->reg_ghc);
}
-static inline void
+static void
jme_reset_mac_processor(struct jme_adapter *jme)
{
static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index d74f5f4e5782..1799fe1415df 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -152,7 +152,7 @@ static inline void korina_abort_dma(struct net_device *dev,
writel(0x10, &ch->dmac);
while (!(readl(&ch->dmas) & DMA_STAT_HALT))
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
writel(0, &ch->dmas);
}
@@ -283,7 +283,7 @@ static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
}
dma_cache_wback((u32) td, sizeof(*td));
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
@@ -622,7 +622,7 @@ korina_tx_dma_interrupt(int irq, void *dev_id)
&(lp->tx_dma_regs->dmandptr));
lp->tx_chain_status = desc_empty;
lp->tx_chain_head = lp->tx_chain_tail;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
if (dmas & DMA_STAT_ERR)
printk(KERN_ERR "%s: DMA error\n", dev->name);
@@ -811,7 +811,7 @@ static int korina_init(struct net_device *dev)
/* reset ethernet logic */
writel(0, &lp->eth_regs->ethintfc);
while ((readl(&lp->eth_regs->ethintfc) & ETH_INT_FC_RIP))
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* Enable Ethernet Interface */
writel(ETH_INT_FC_EN, &lp->eth_regs->ethintfc);
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index b630ef1e9646..91e09d68b7e2 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -102,7 +102,6 @@ struct ltq_etop_priv {
struct resource *res;
struct mii_bus *mii_bus;
- struct phy_device *phydev;
struct ltq_etop_chan ch[MAX_DMA_CHAN];
int tx_free[MAX_DMA_CHAN >> 1];
@@ -305,34 +304,16 @@ ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
static int
-ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ltq_etop_priv *priv = netdev_priv(dev);
-
- return phy_ethtool_gset(priv->phydev, cmd);
-}
-
-static int
-ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct ltq_etop_priv *priv = netdev_priv(dev);
-
- return phy_ethtool_sset(priv->phydev, cmd);
-}
-
-static int
ltq_etop_nway_reset(struct net_device *dev)
{
- struct ltq_etop_priv *priv = netdev_priv(dev);
-
- return phy_start_aneg(priv->phydev);
+ return phy_start_aneg(dev->phydev);
}
static const struct ethtool_ops ltq_etop_ethtool_ops = {
.get_drvinfo = ltq_etop_get_drvinfo,
- .get_settings = ltq_etop_get_settings,
- .set_settings = ltq_etop_set_settings,
.nway_reset = ltq_etop_nway_reset,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int
@@ -401,7 +382,6 @@ ltq_etop_mdio_probe(struct net_device *dev)
| SUPPORTED_TP);
phydev->advertising = phydev->supported;
- priv->phydev = phydev;
phy_attached_info(phydev);
return 0;
@@ -411,7 +391,6 @@ static int
ltq_etop_mdio_init(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
- int i;
int err;
priv->mii_bus = mdiobus_alloc();
@@ -451,7 +430,7 @@ ltq_etop_mdio_cleanup(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
- phy_disconnect(priv->phydev);
+ phy_disconnect(dev->phydev);
mdiobus_unregister(priv->mii_bus);
mdiobus_free(priv->mii_bus);
}
@@ -470,7 +449,7 @@ ltq_etop_open(struct net_device *dev)
ltq_dma_open(&ch->dma);
napi_enable(&ch->napi);
}
- phy_start(priv->phydev);
+ phy_start(dev->phydev);
netif_tx_start_all_queues(dev);
return 0;
}
@@ -482,7 +461,7 @@ ltq_etop_stop(struct net_device *dev)
int i;
netif_tx_stop_all_queues(dev);
- phy_stop(priv->phydev);
+ phy_stop(dev->phydev);
for (i = 0; i < MAX_DMA_CHAN; i++) {
struct ltq_etop_chan *ch = &priv->ch[i];
@@ -519,7 +498,7 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
byte_offset = CPHYSADDR(skb->data) % 16;
ch->skb[ch->dma.desc] = skb;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
spin_lock_irqsave(&priv->lock, flags);
desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
@@ -557,10 +536,8 @@ ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
static int
ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct ltq_etop_priv *priv = netdev_priv(dev);
-
/* TODO: mii-toll reports "No MII transceiver present!." ?!*/
- return phy_mii_ioctl(priv->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
static int
@@ -657,7 +634,7 @@ ltq_etop_tx_timeout(struct net_device *dev)
err = ltq_etop_hw_init(dev);
if (err)
goto err_hw;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_wake_queue(dev);
return;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index a6d26d351dfc..d41c28d00b57 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -244,7 +244,7 @@
/* Various constants */
/* Coalescing */
-#define MVNETA_TXDONE_COAL_PKTS 1
+#define MVNETA_TXDONE_COAL_PKTS 0 /* interrupt per packet */
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
@@ -3458,6 +3458,8 @@ static int mvneta_open(struct net_device *dev)
return 0;
err_free_irq:
+ unregister_cpu_notifier(&pp->cpu_notifier);
+ on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(pp->dev->irq, pp->ports);
err_cleanup_txqs:
mvneta_cleanup_txqs(pp);
@@ -4116,6 +4118,7 @@ static int mvneta_probe(struct platform_device *pdev)
pp->bm_priv = NULL;
}
}
+ of_node_put(bm_node);
err = mvneta_init(&pdev->dev, pp);
if (err < 0)
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 01fccec632ec..466939f8f0cf 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -189,6 +189,7 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
hwbm_pool->construct = mvneta_bm_construct;
hwbm_pool->priv = new_pool;
+ spin_lock_init(&hwbm_pool->lock);
/* Create new pool */
err = mvneta_bm_pool_create(priv, new_pool);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 868a957f24bb..60227a3452a4 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -699,7 +699,6 @@ struct mvpp2_port {
u16 rx_ring_size;
struct mvpp2_pcpu_stats __percpu *stats;
- struct phy_device *phy_dev;
phy_interface_t phy_interface;
struct device_node *phy_node;
unsigned int link;
@@ -4850,7 +4849,7 @@ static irqreturn_t mvpp2_isr(int irq, void *dev_id)
static void mvpp2_link_event(struct net_device *dev)
{
struct mvpp2_port *port = netdev_priv(dev);
- struct phy_device *phydev = port->phy_dev;
+ struct phy_device *phydev = dev->phydev;
int status_change = 0;
u32 val;
@@ -5416,6 +5415,8 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
/* Set hw internals when starting port */
static void mvpp2_start_dev(struct mvpp2_port *port)
{
+ struct net_device *ndev = port->dev;
+
mvpp2_gmac_max_rx_size_set(port);
mvpp2_txp_max_tx_size_set(port);
@@ -5425,13 +5426,15 @@ static void mvpp2_start_dev(struct mvpp2_port *port)
mvpp2_interrupts_enable(port);
mvpp2_port_enable(port);
- phy_start(port->phy_dev);
+ phy_start(ndev->phydev);
netif_tx_start_all_queues(port->dev);
}
/* Set hw internals when stopping port */
static void mvpp2_stop_dev(struct mvpp2_port *port)
{
+ struct net_device *ndev = port->dev;
+
/* Stop new packets from arriving to RXQs */
mvpp2_ingress_disable(port);
@@ -5447,7 +5450,7 @@ static void mvpp2_stop_dev(struct mvpp2_port *port)
mvpp2_egress_disable(port);
mvpp2_port_disable(port);
- phy_stop(port->phy_dev);
+ phy_stop(ndev->phydev);
}
/* Return positive if MTU is valid */
@@ -5535,7 +5538,6 @@ static int mvpp2_phy_connect(struct mvpp2_port *port)
phy_dev->supported &= PHY_GBIT_FEATURES;
phy_dev->advertising = phy_dev->supported;
- port->phy_dev = phy_dev;
port->link = 0;
port->duplex = 0;
port->speed = 0;
@@ -5545,8 +5547,9 @@ static int mvpp2_phy_connect(struct mvpp2_port *port)
static void mvpp2_phy_disconnect(struct mvpp2_port *port)
{
- phy_disconnect(port->phy_dev);
- port->phy_dev = NULL;
+ struct net_device *ndev = port->dev;
+
+ phy_disconnect(ndev->phydev);
}
static int mvpp2_open(struct net_device *dev)
@@ -5796,13 +5799,12 @@ mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct mvpp2_port *port = netdev_priv(dev);
int ret;
- if (!port->phy_dev)
+ if (!dev->phydev)
return -ENOTSUPP;
- ret = phy_mii_ioctl(port->phy_dev, ifr, cmd);
+ ret = phy_mii_ioctl(dev->phydev, ifr, cmd);
if (!ret)
mvpp2_link_event(dev);
@@ -5811,28 +5813,6 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* Ethtool methods */
-/* Get settings (phy address, speed) for ethtools */
-static int mvpp2_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct mvpp2_port *port = netdev_priv(dev);
-
- if (!port->phy_dev)
- return -ENODEV;
- return phy_ethtool_gset(port->phy_dev, cmd);
-}
-
-/* Set settings (phy address, speed) for ethtools */
-static int mvpp2_ethtool_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct mvpp2_port *port = netdev_priv(dev);
-
- if (!port->phy_dev)
- return -ENODEV;
- return phy_ethtool_sset(port->phy_dev, cmd);
-}
-
/* Set interrupt coalescing for ethtools */
static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *c)
@@ -5967,13 +5947,13 @@ static const struct net_device_ops mvpp2_netdev_ops = {
static const struct ethtool_ops mvpp2_eth_tool_ops = {
.get_link = ethtool_op_get_link,
- .get_settings = mvpp2_ethtool_get_settings,
- .set_settings = mvpp2_ethtool_set_settings,
.set_coalesce = mvpp2_ethtool_set_coalesce,
.get_coalesce = mvpp2_ethtool_get_coalesce,
.get_drvinfo = mvpp2_ethtool_get_drvinfo,
.get_ringparam = mvpp2_ethtool_get_ringparam,
.set_ringparam = mvpp2_ethtool_set_ringparam,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/* Driver initialization */
@@ -6254,6 +6234,7 @@ err_free_stats:
err_free_irq:
irq_dispose_mapping(port->irq);
err_free_netdev:
+ of_node_put(phy_node);
free_netdev(dev);
return err;
}
@@ -6264,6 +6245,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
int i;
unregister_netdev(port->dev);
+ of_node_put(port->phy_node);
free_percpu(port->pcpu);
free_percpu(port->stats);
for (i = 0; i < txq_number; i++)
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index c442f6ad15ff..5d5000c8edf1 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -247,7 +247,6 @@ struct pxa168_eth_private {
*/
struct timer_list timeout;
struct mii_bus *smi_bus;
- struct phy_device *phy;
/* clock */
struct clk *clk;
@@ -275,8 +274,8 @@ enum hash_table_entry {
HASH_ENTRY_RECEIVE_DISCARD_BIT = 2
};
-static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd);
-static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd);
+static int pxa168_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd);
static int pxa168_init_hw(struct pxa168_eth_private *pep);
static int pxa168_init_phy(struct net_device *dev);
static void eth_port_reset(struct net_device *dev);
@@ -286,12 +285,12 @@ static int pxa168_eth_stop(struct net_device *dev);
static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
{
- return readl(pep->base + offset);
+ return readl_relaxed(pep->base + offset);
}
static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
{
- writel(data, pep->base + offset);
+ writel_relaxed(data, pep->base + offset);
}
static void abort_dma(struct pxa168_eth_private *pep)
@@ -342,9 +341,9 @@ static void rxq_refill(struct net_device *dev)
pep->rx_skb[used_rx_desc] = skb;
/* Return the descriptor to DMA ownership */
- wmb();
+ dma_wmb();
p_used_rx_desc->cmd_sts = BUF_OWNED_BY_DMA | RX_EN_INT;
- wmb();
+ dma_wmb();
/* Move the used descriptor pointer to the next descriptor */
pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
@@ -644,7 +643,7 @@ static void eth_port_start(struct net_device *dev)
struct pxa168_eth_private *pep = netdev_priv(dev);
int tx_curr_desc, rx_curr_desc;
- phy_start(pep->phy);
+ phy_start(dev->phydev);
/* Assignment of Tx CTRP of given queue */
tx_curr_desc = pep->tx_curr_desc_q;
@@ -700,7 +699,7 @@ static void eth_port_reset(struct net_device *dev)
val &= ~PCR_EN;
wrl(pep, PORT_CONFIG, val);
- phy_stop(pep->phy);
+ phy_stop(dev->phydev);
}
/*
@@ -794,7 +793,7 @@ static int rxq_process(struct net_device *dev, int budget)
rx_used_desc = pep->rx_used_desc_q;
rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
cmd_sts = rx_desc->cmd_sts;
- rmb();
+ dma_rmb();
if (cmd_sts & (BUF_OWNED_BY_DMA))
break;
skb = pep->rx_skb[rx_curr_desc];
@@ -943,7 +942,7 @@ static int set_port_config_ext(struct pxa168_eth_private *pep)
static void pxa168_eth_adjust_link(struct net_device *dev)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
- struct phy_device *phy = pep->phy;
+ struct phy_device *phy = dev->phydev;
u32 cfg, cfg_o = rdl(pep, PORT_CONFIG);
u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT);
@@ -972,37 +971,37 @@ static void pxa168_eth_adjust_link(struct net_device *dev)
static int pxa168_init_phy(struct net_device *dev)
{
struct pxa168_eth_private *pep = netdev_priv(dev);
- struct ethtool_cmd cmd;
+ struct ethtool_link_ksettings cmd;
+ struct phy_device *phy = NULL;
int err;
- if (pep->phy)
+ if (dev->phydev)
return 0;
- pep->phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
- if (IS_ERR(pep->phy))
- return PTR_ERR(pep->phy);
- if (!pep->phy)
- return -ENODEV;
+ phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
+ if (IS_ERR(phy))
+ return PTR_ERR(phy);
- err = phy_connect_direct(dev, pep->phy, pxa168_eth_adjust_link,
+ err = phy_connect_direct(dev, phy, pxa168_eth_adjust_link,
pep->phy_intf);
if (err)
return err;
- err = pxa168_get_settings(dev, &cmd);
+ err = pxa168_get_link_ksettings(dev, &cmd);
if (err)
return err;
- cmd.phy_address = pep->phy_addr;
- cmd.speed = pep->phy_speed;
- cmd.duplex = pep->phy_duplex;
- cmd.advertising = PHY_BASIC_FEATURES;
- cmd.autoneg = AUTONEG_ENABLE;
+ cmd.base.phy_address = pep->phy_addr;
+ cmd.base.speed = pep->phy_speed;
+ cmd.base.duplex = pep->phy_duplex;
+ ethtool_convert_legacy_u32_to_link_mode(cmd.link_modes.advertising,
+ PHY_BASIC_FEATURES);
+ cmd.base.autoneg = AUTONEG_ENABLE;
- if (cmd.speed != 0)
- cmd.autoneg = AUTONEG_DISABLE;
+ if (cmd.base.speed != 0)
+ cmd.base.autoneg = AUTONEG_DISABLE;
- return pxa168_set_settings(dev, &cmd);
+ return phy_ethtool_set_link_ksettings(dev, &cmd);
}
static int pxa168_init_hw(struct pxa168_eth_private *pep)
@@ -1289,7 +1288,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
- wmb();
+ dma_wmb();
desc->cmd_sts = BUF_OWNED_BY_DMA | TX_GEN_CRC | TX_FIRST_DESC |
TX_ZERO_PADDING | TX_LAST_DESC | TX_EN_INT;
wmb();
@@ -1297,7 +1296,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
stats->tx_bytes += length;
stats->tx_packets++;
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
/* We handled the current skb, but now we are out of space.*/
netif_stop_queue(dev);
@@ -1368,32 +1367,24 @@ static int pxa168_smi_write(struct mii_bus *bus, int phy_addr, int regnum,
static int pxa168_eth_do_ioctl(struct net_device *dev, struct ifreq *ifr,
int cmd)
{
- struct pxa168_eth_private *pep = netdev_priv(dev);
- if (pep->phy != NULL)
- return phy_mii_ioctl(pep->phy, ifr, cmd);
+ if (dev->phydev != NULL)
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
return -EOPNOTSUPP;
}
-static int pxa168_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int pxa168_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
- struct pxa168_eth_private *pep = netdev_priv(dev);
int err;
- err = phy_read_status(pep->phy);
+ err = phy_read_status(dev->phydev);
if (err == 0)
- err = phy_ethtool_gset(pep->phy, cmd);
+ err = phy_ethtool_ksettings_get(dev->phydev, cmd);
return err;
}
-static int pxa168_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct pxa168_eth_private *pep = netdev_priv(dev);
-
- return phy_ethtool_sset(pep->phy, cmd);
-}
-
static void pxa168_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
@@ -1404,11 +1395,11 @@ static void pxa168_get_drvinfo(struct net_device *dev,
}
static const struct ethtool_ops pxa168_ethtool_ops = {
- .get_settings = pxa168_get_settings,
- .set_settings = pxa168_set_settings,
.get_drvinfo = pxa168_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = pxa168_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops pxa168_eth_netdev_ops = {
@@ -1515,6 +1506,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
}
of_property_read_u32(np, "reg", &pep->phy_addr);
pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
+ of_node_put(np);
}
/* Hardware supports only 3 ports */
@@ -1571,8 +1563,8 @@ static int pxa168_eth_remove(struct platform_device *pdev)
pep->htpr, pep->htpr_dma);
pep->htpr = NULL;
}
- if (pep->phy)
- phy_disconnect(pep->phy);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
if (pep->clk) {
clk_disable_unprepare(pep->clk);
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index ec0a22119e09..467138b423d3 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2418,7 +2418,7 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_write32(hw, B0_IMSK, 0);
sky2_read32(hw, B0_IMSK);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
napi_disable(&hw->napi);
netif_tx_disable(dev);
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index e0b68afea56e..3743af8f1ded 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -50,6 +50,10 @@ static const struct mtk_ethtool_stats {
MTK_ETHTOOL_STAT(rx_flow_control_packets),
};
+static const char * const mtk_clks_source_name[] = {
+ "ethif", "esw", "gp1", "gp2"
+};
+
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
{
__raw_writel(val, eth->base + reg);
@@ -76,8 +80,8 @@ static int mtk_mdio_busy_wait(struct mtk_eth *eth)
return -1;
}
-u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
- u32 phy_register, u32 write_data)
+static u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
+ u32 phy_register, u32 write_data)
{
if (mtk_mdio_busy_wait(eth))
return -1;
@@ -95,7 +99,7 @@ u32 _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr,
return 0;
}
-u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
+static u32 _mtk_mdio_read(struct mtk_eth *eth, int phy_addr, int phy_reg)
{
u32 d;
@@ -133,6 +137,8 @@ static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg)
static void mtk_phy_link_adjust(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
+ u16 lcl_adv = 0, rmt_adv = 0;
+ u8 flowctrl;
u32 mcr = MAC_MCR_MAX_RX_1536 | MAC_MCR_IPG_CFG |
MAC_MCR_FORCE_MODE | MAC_MCR_TX_EN |
MAC_MCR_RX_EN | MAC_MCR_BACKOFF_EN |
@@ -150,11 +156,30 @@ static void mtk_phy_link_adjust(struct net_device *dev)
if (mac->phy_dev->link)
mcr |= MAC_MCR_FORCE_LINK;
- if (mac->phy_dev->duplex)
+ if (mac->phy_dev->duplex) {
mcr |= MAC_MCR_FORCE_DPX;
- if (mac->phy_dev->pause)
- mcr |= MAC_MCR_FORCE_RX_FC | MAC_MCR_FORCE_TX_FC;
+ if (mac->phy_dev->pause)
+ rmt_adv = LPA_PAUSE_CAP;
+ if (mac->phy_dev->asym_pause)
+ rmt_adv |= LPA_PAUSE_ASYM;
+
+ if (mac->phy_dev->advertising & ADVERTISED_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_CAP;
+ if (mac->phy_dev->advertising & ADVERTISED_Asym_Pause)
+ lcl_adv |= ADVERTISE_PAUSE_ASYM;
+
+ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
+
+ if (flowctrl & FLOW_CTRL_TX)
+ mcr |= MAC_MCR_FORCE_TX_FC;
+ if (flowctrl & FLOW_CTRL_RX)
+ mcr |= MAC_MCR_FORCE_RX_FC;
+
+ netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
+ flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
+ flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
+ }
mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
@@ -208,22 +233,32 @@ static int mtk_phy_connect(struct mtk_mac *mac)
u32 val, ge_mode;
np = of_parse_phandle(mac->of_node, "phy-handle", 0);
+ if (!np && of_phy_is_fixed_link(mac->of_node))
+ if (!of_phy_register_fixed_link(mac->of_node))
+ np = of_node_get(mac->of_node);
if (!np)
return -ENODEV;
switch (of_get_phy_mode(np)) {
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII:
ge_mode = 0;
break;
case PHY_INTERFACE_MODE_MII:
ge_mode = 1;
break;
- case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_REVMII:
ge_mode = 2;
break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (!mac->id)
+ goto err_phy;
+ ge_mode = 3;
+ break;
default:
- dev_err(eth->dev, "invalid phy_mode\n");
- return -1;
+ goto err_phy;
}
/* put the gmac into the right mode */
@@ -236,18 +271,31 @@ static int mtk_phy_connect(struct mtk_mac *mac)
mac->phy_dev->autoneg = AUTONEG_ENABLE;
mac->phy_dev->speed = 0;
mac->phy_dev->duplex = 0;
- mac->phy_dev->supported &= PHY_BASIC_FEATURES;
+
+ if (of_phy_is_fixed_link(mac->of_node))
+ mac->phy_dev->supported |=
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
+ mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
+ SUPPORTED_Asym_Pause;
mac->phy_dev->advertising = mac->phy_dev->supported |
ADVERTISED_Autoneg;
phy_start_aneg(mac->phy_dev);
+ of_node_put(np);
+
return 0;
+
+err_phy:
+ of_node_put(np);
+ dev_err(eth->dev, "invalid phy_mode\n");
+ return -EINVAL;
}
static int mtk_mdio_init(struct mtk_eth *eth)
{
struct device_node *mii_np;
- int err;
+ int ret;
mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
if (!mii_np) {
@@ -256,13 +304,13 @@ static int mtk_mdio_init(struct mtk_eth *eth)
}
if (!of_device_is_available(mii_np)) {
- err = 0;
+ ret = -ENODEV;
goto err_put_node;
}
- eth->mii_bus = mdiobus_alloc();
+ eth->mii_bus = devm_mdiobus_alloc(eth->dev);
if (!eth->mii_bus) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_put_node;
}
@@ -273,19 +321,11 @@ static int mtk_mdio_init(struct mtk_eth *eth)
eth->mii_bus->parent = eth->dev;
snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
- err = of_mdiobus_register(eth->mii_bus, mii_np);
- if (err)
- goto err_free_bus;
-
- return 0;
-
-err_free_bus:
- kfree(eth->mii_bus);
+ ret = of_mdiobus_register(eth->mii_bus, mii_np);
err_put_node:
of_node_put(mii_np);
- eth->mii_bus = NULL;
- return err;
+ return ret;
}
static void mtk_mdio_cleanup(struct mtk_eth *eth)
@@ -294,28 +334,28 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
return;
mdiobus_unregister(eth->mii_bus);
- of_node_put(eth->mii_bus->dev.of_node);
- kfree(eth->mii_bus);
}
static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
{
+ unsigned long flags;
u32 val;
+ spin_lock_irqsave(&eth->irq_lock, flags);
val = mtk_r32(eth, MTK_QDMA_INT_MASK);
mtk_w32(eth, val & ~mask, MTK_QDMA_INT_MASK);
- /* flush write */
- mtk_r32(eth, MTK_QDMA_INT_MASK);
+ spin_unlock_irqrestore(&eth->irq_lock, flags);
}
static inline void mtk_irq_enable(struct mtk_eth *eth, u32 mask)
{
+ unsigned long flags;
u32 val;
+ spin_lock_irqsave(&eth->irq_lock, flags);
val = mtk_r32(eth, MTK_QDMA_INT_MASK);
mtk_w32(eth, val | mask, MTK_QDMA_INT_MASK);
- /* flush write */
- mtk_r32(eth, MTK_QDMA_INT_MASK);
+ spin_unlock_irqrestore(&eth->irq_lock, flags);
}
static int mtk_set_mac_address(struct net_device *dev, void *p)
@@ -453,20 +493,23 @@ static inline void mtk_rx_get_desc(struct mtk_rx_dma *rxd,
/* the qdma core needs scratch memory to be setup */
static int mtk_init_fq_dma(struct mtk_eth *eth)
{
- dma_addr_t phy_ring_head, phy_ring_tail;
+ dma_addr_t phy_ring_tail;
int cnt = MTK_DMA_SIZE;
dma_addr_t dma_addr;
int i;
eth->scratch_ring = dma_alloc_coherent(eth->dev,
cnt * sizeof(struct mtk_tx_dma),
- &phy_ring_head,
+ &eth->phy_scratch_ring,
GFP_ATOMIC | __GFP_ZERO);
if (unlikely(!eth->scratch_ring))
return -ENOMEM;
eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE,
GFP_KERNEL);
+ if (unlikely(!eth->scratch_head))
+ return -ENOMEM;
+
dma_addr = dma_map_single(eth->dev,
eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
DMA_FROM_DEVICE);
@@ -474,19 +517,19 @@ static int mtk_init_fq_dma(struct mtk_eth *eth)
return -ENOMEM;
memset(eth->scratch_ring, 0x0, sizeof(struct mtk_tx_dma) * cnt);
- phy_ring_tail = phy_ring_head +
+ phy_ring_tail = eth->phy_scratch_ring +
(sizeof(struct mtk_tx_dma) * (cnt - 1));
for (i = 0; i < cnt; i++) {
eth->scratch_ring[i].txd1 =
(dma_addr + (i * MTK_QDMA_PAGE_SIZE));
if (i < cnt - 1)
- eth->scratch_ring[i].txd2 = (phy_ring_head +
+ eth->scratch_ring[i].txd2 = (eth->phy_scratch_ring +
((i + 1) * sizeof(struct mtk_tx_dma)));
eth->scratch_ring[i].txd3 = TX_DMA_SDL(MTK_QDMA_PAGE_SIZE);
}
- mtk_w32(eth, phy_ring_head, MTK_QDMA_FQ_HEAD);
+ mtk_w32(eth, eth->phy_scratch_ring, MTK_QDMA_FQ_HEAD);
mtk_w32(eth, phy_ring_tail, MTK_QDMA_FQ_TAIL);
mtk_w32(eth, (cnt << 16) | cnt, MTK_QDMA_FQ_CNT);
mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, MTK_QDMA_FQ_BLEN);
@@ -509,15 +552,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
return &ring->buf[idx];
}
-static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
+static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
{
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
- dma_unmap_single(dev,
+ dma_unmap_single(eth->dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
- dma_unmap_page(dev,
+ dma_unmap_page(eth->dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
@@ -536,18 +579,18 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
struct mtk_eth *eth = mac->hw;
struct mtk_tx_dma *itxd, *txd;
struct mtk_tx_buf *tx_buf;
- unsigned long flags;
dma_addr_t mapped_addr;
unsigned int nr_frags;
int i, n_desc = 1;
- u32 txd4 = 0;
+ u32 txd4 = 0, fport;
itxd = ring->next_free;
if (itxd == ring->last_free)
return -ENOMEM;
/* set the forward port */
- txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+ fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+ txd4 |= fport;
tx_buf = mtk_desc_to_tx_buf(ring, itxd);
memset(tx_buf, 0, sizeof(*tx_buf));
@@ -563,16 +606,11 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (skb_vlan_tag_present(skb))
txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
- mapped_addr = dma_map_single(&dev->dev, skb->data,
+ mapped_addr = dma_map_single(eth->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
+ if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
return -ENOMEM;
- /* normally we can rely on the stack not calling this more than once,
- * however we have 2 queues running ont he same ring so we need to lock
- * the ring access
- */
- spin_lock_irqsave(&eth->page_lock, flags);
WRITE_ONCE(itxd->txd1, mapped_addr);
tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
@@ -596,10 +634,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
n_desc++;
frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
+ mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
frag_map_size,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
+ if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
goto err_dma;
if (i == nr_frags - 1 &&
@@ -609,9 +647,8 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(txd->txd1, mapped_addr);
WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
TX_DMA_PLEN0(frag_map_size) |
- last_frag * TX_DMA_LS0) |
- mac->id);
- WRITE_ONCE(txd->txd4, 0);
+ last_frag * TX_DMA_LS0));
+ WRITE_ONCE(txd->txd4, fport);
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
tx_buf = mtk_desc_to_tx_buf(ring, txd);
@@ -632,8 +669,6 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
(!nr_frags * TX_DMA_LS0)));
- spin_unlock_irqrestore(&eth->page_lock, flags);
-
netdev_sent_queue(dev, skb->len);
skb_tx_timestamp(skb);
@@ -652,17 +687,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
err_dma:
do {
- tx_buf = mtk_desc_to_tx_buf(ring, txd);
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd);
/* unmap dma */
- mtk_tx_unmap(&dev->dev, tx_buf);
+ mtk_tx_unmap(eth, tx_buf);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
} while (itxd != txd);
- spin_unlock_irqrestore(&eth->page_lock, flags);
-
return -ENOMEM;
}
@@ -681,7 +714,43 @@ static inline int mtk_cal_txd_req(struct sk_buff *skb)
nfrags += skb_shinfo(skb)->nr_frags;
}
- return DIV_ROUND_UP(nfrags, 2);
+ return nfrags;
+}
+
+static int mtk_queue_stopped(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ if (netif_queue_stopped(eth->netdev[i]))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void mtk_wake_queue(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ netif_wake_queue(eth->netdev[i]);
+ }
+}
+
+static void mtk_stop_queue(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ netif_stop_queue(eth->netdev[i]);
+ }
}
static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -690,14 +759,22 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct mtk_eth *eth = mac->hw;
struct mtk_tx_ring *ring = &eth->tx_ring;
struct net_device_stats *stats = &dev->stats;
+ unsigned long flags;
bool gso = false;
int tx_num;
+ /* normally we can rely on the stack not calling this more than once,
+ * however we have 2 queues running on the same ring so we need to lock
+ * the ring access
+ */
+ spin_lock_irqsave(&eth->page_lock, flags);
+
tx_num = mtk_cal_txd_req(skb);
if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
- netif_stop_queue(dev);
+ mtk_stop_queue(eth);
netif_err(eth, tx_queued, dev,
"Tx Ring full when queue awake!\n");
+ spin_unlock_irqrestore(&eth->page_lock, flags);
return NETDEV_TX_BUSY;
}
@@ -719,23 +796,22 @@ static int mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
goto drop;
- if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) {
- netif_stop_queue(dev);
- if (unlikely(atomic_read(&ring->free_count) >
- ring->thresh))
- netif_wake_queue(dev);
- }
+ if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
+ mtk_stop_queue(eth);
+
+ spin_unlock_irqrestore(&eth->page_lock, flags);
return NETDEV_TX_OK;
drop:
+ spin_unlock_irqrestore(&eth->page_lock, flags);
stats->tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static int mtk_poll_rx(struct napi_struct *napi, int budget,
- struct mtk_eth *eth, u32 rx_intr)
+ struct mtk_eth *eth)
{
struct mtk_rx_ring *ring = &eth->rx_ring;
int idx = ring->calc_idx;
@@ -771,24 +847,26 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
netdev->stats.rx_dropped++;
goto release_desc;
}
- dma_addr = dma_map_single(&eth->netdev[mac]->dev,
+ dma_addr = dma_map_single(eth->dev,
new_data + NET_SKB_PAD,
ring->buf_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
+ if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
goto release_desc;
}
/* receive data */
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
- put_page(virt_to_head_page(new_data));
+ skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
goto release_desc;
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- dma_unmap_single(&netdev->dev, trxd.rxd1,
+ dma_unmap_single(eth->dev, trxd.rxd1,
ring->buf_size, DMA_FROM_DEVICE);
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
@@ -821,22 +899,22 @@ release_desc:
}
if (done < budget)
- mtk_w32(eth, rx_intr, MTK_QMTK_INT_STATUS);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
return done;
}
-static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
+static int mtk_poll_tx(struct mtk_eth *eth, int budget)
{
struct mtk_tx_ring *ring = &eth->tx_ring;
struct mtk_tx_dma *desc;
struct sk_buff *skb;
struct mtk_tx_buf *tx_buf;
- int total = 0, done[MTK_MAX_DEVS];
+ unsigned int done[MTK_MAX_DEVS];
unsigned int bytes[MTK_MAX_DEVS];
u32 cpu, dma;
static int condition;
- int i;
+ int total = 0, i;
memset(done, 0, sizeof(done));
memset(bytes, 0, sizeof(bytes));
@@ -870,9 +948,8 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
done[mac]++;
budget--;
}
- mtk_tx_unmap(eth->dev, tx_buf);
+ mtk_tx_unmap(eth, tx_buf);
- ring->last_free->txd2 = next_cpu;
ring->last_free = desc;
atomic_inc(&ring->free_count);
@@ -888,69 +965,82 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget, bool *tx_again)
total += done[i];
}
- /* read hw index again make sure no new tx packet */
- if (cpu != dma || cpu != mtk_r32(eth, MTK_QTX_DRX_PTR))
- *tx_again = true;
- else
- mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
+ if (mtk_queue_stopped(eth) &&
+ (atomic_read(&ring->free_count) > ring->thresh))
+ mtk_wake_queue(eth);
- if (!total)
- return 0;
+ return total;
+}
- for (i = 0; i < MTK_MAC_COUNT; i++) {
- if (!eth->netdev[i] ||
- unlikely(!netif_queue_stopped(eth->netdev[i])))
- continue;
- if (atomic_read(&ring->free_count) > ring->thresh)
- netif_wake_queue(eth->netdev[i]);
- }
+static void mtk_handle_status_irq(struct mtk_eth *eth)
+{
+ u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
- return total;
+ if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
+ mtk_stats_update(eth);
+ mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
+ MTK_INT_STATUS2);
+ }
}
-static int mtk_poll(struct napi_struct *napi, int budget)
+static int mtk_napi_tx(struct napi_struct *napi, int budget)
{
- struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
- u32 status, status2, mask, tx_intr, rx_intr, status_intr;
- int tx_done, rx_done;
- bool tx_again = false;
+ struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
+ u32 status, mask;
+ int tx_done = 0;
+
+ mtk_handle_status_irq(eth);
+ mtk_w32(eth, MTK_TX_DONE_INT, MTK_QMTK_INT_STATUS);
+ tx_done = mtk_poll_tx(eth, budget);
+
+ if (unlikely(netif_msg_intr(eth))) {
+ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
+ mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
+ dev_info(eth->dev,
+ "done tx %d, intr 0x%08x/0x%x\n",
+ tx_done, status, mask);
+ }
+
+ if (tx_done == budget)
+ return budget;
status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- status2 = mtk_r32(eth, MTK_INT_STATUS2);
- tx_intr = MTK_TX_DONE_INT;
- rx_intr = MTK_RX_DONE_INT;
- status_intr = (MTK_GDM1_AF | MTK_GDM2_AF);
- tx_done = 0;
- rx_done = 0;
- tx_again = 0;
+ if (status & MTK_TX_DONE_INT)
+ return budget;
- if (status & tx_intr)
- tx_done = mtk_poll_tx(eth, budget, &tx_again);
+ napi_complete(napi);
+ mtk_irq_enable(eth, MTK_TX_DONE_INT);
- if (status & rx_intr)
- rx_done = mtk_poll_rx(napi, budget, eth, rx_intr);
+ return tx_done;
+}
- if (unlikely(status2 & status_intr)) {
- mtk_stats_update(eth);
- mtk_w32(eth, status_intr, MTK_INT_STATUS2);
- }
+static int mtk_napi_rx(struct napi_struct *napi, int budget)
+{
+ struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
+ u32 status, mask;
+ int rx_done = 0;
+
+ mtk_handle_status_irq(eth);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_QMTK_INT_STATUS);
+ rx_done = mtk_poll_rx(napi, budget, eth);
if (unlikely(netif_msg_intr(eth))) {
+ status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
mask = mtk_r32(eth, MTK_QDMA_INT_MASK);
- netdev_info(eth->netdev[0],
- "done tx %d, rx %d, intr 0x%08x/0x%x\n",
- tx_done, rx_done, status, mask);
+ dev_info(eth->dev,
+ "done rx %d, intr 0x%08x/0x%x\n",
+ rx_done, status, mask);
}
- if (tx_again || rx_done == budget)
+ if (rx_done == budget)
return budget;
status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- if (status & (tx_intr | rx_intr))
+ if (status & MTK_RX_DONE_INT)
return budget;
napi_complete(napi);
- mtk_irq_enable(eth, tx_intr | rx_intr);
+ mtk_irq_enable(eth, MTK_RX_DONE_INT);
return rx_done;
}
@@ -983,9 +1073,8 @@ static int mtk_tx_alloc(struct mtk_eth *eth)
atomic_set(&ring->free_count, MTK_DMA_SIZE - 2);
ring->next_free = &ring->dma[0];
- ring->last_free = &ring->dma[MTK_DMA_SIZE - 2];
- ring->thresh = max((unsigned long)MTK_DMA_SIZE >> 2,
- MAX_SKB_FRAGS);
+ ring->last_free = &ring->dma[MTK_DMA_SIZE - 1];
+ ring->thresh = MAX_SKB_FRAGS;
/* make sure that all changes to the dma ring are flushed before we
* continue
@@ -1014,7 +1103,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
if (ring->buf) {
for (i = 0; i < MTK_DMA_SIZE; i++)
- mtk_tx_unmap(eth->dev, &ring->buf[i]);
+ mtk_tx_unmap(eth, &ring->buf[i]);
kfree(ring->buf);
ring->buf = NULL;
}
@@ -1163,6 +1252,14 @@ static void mtk_dma_free(struct mtk_eth *eth)
for (i = 0; i < MTK_MAC_COUNT; i++)
if (eth->netdev[i])
netdev_reset_queue(eth->netdev[i]);
+ if (eth->scratch_ring) {
+ dma_free_coherent(eth->dev,
+ MTK_DMA_SIZE * sizeof(struct mtk_tx_dma),
+ eth->scratch_ring,
+ eth->phy_scratch_ring);
+ eth->scratch_ring = NULL;
+ eth->phy_scratch_ring = 0;
+ }
mtk_tx_clean(eth);
mtk_rx_clean(eth);
kfree(eth->scratch_head);
@@ -1176,25 +1273,29 @@ static void mtk_tx_timeout(struct net_device *dev)
eth->netdev[mac->id]->stats.tx_errors++;
netif_err(eth, tx_err, dev,
"transmit timed out\n");
- schedule_work(&mac->pending_work);
+ schedule_work(&eth->pending_work);
}
-static irqreturn_t mtk_handle_irq(int irq, void *_eth)
+static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
{
struct mtk_eth *eth = _eth;
- u32 status;
- status = mtk_r32(eth, MTK_QMTK_INT_STATUS);
- if (unlikely(!status))
- return IRQ_NONE;
+ if (likely(napi_schedule_prep(&eth->rx_napi))) {
+ __napi_schedule(&eth->rx_napi);
+ mtk_irq_disable(eth, MTK_RX_DONE_INT);
+ }
- if (likely(status & (MTK_RX_DONE_INT | MTK_TX_DONE_INT))) {
- if (likely(napi_schedule_prep(&eth->rx_napi)))
- __napi_schedule(&eth->rx_napi);
- } else {
- mtk_w32(eth, status, MTK_QMTK_INT_STATUS);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+
+ if (likely(napi_schedule_prep(&eth->tx_napi))) {
+ __napi_schedule(&eth->tx_napi);
+ mtk_irq_disable(eth, MTK_TX_DONE_INT);
}
- mtk_irq_disable(eth, (MTK_RX_DONE_INT | MTK_TX_DONE_INT));
return IRQ_HANDLED;
}
@@ -1207,7 +1308,7 @@ static void mtk_poll_controller(struct net_device *dev)
u32 int_mask = MTK_TX_DONE_INT | MTK_RX_DONE_INT;
mtk_irq_disable(eth, int_mask);
- mtk_handle_irq(dev->irq, dev);
+ mtk_handle_irq_rx(eth->irq[2], dev);
mtk_irq_enable(eth, int_mask);
}
#endif
@@ -1225,7 +1326,7 @@ static int mtk_start_dma(struct mtk_eth *eth)
mtk_w32(eth,
MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN |
MTK_RX_2B_OFFSET | MTK_DMA_SIZE_16DWORDS |
- MTK_RX_BT_32DWORDS,
+ MTK_RX_BT_32DWORDS | MTK_NDP_CO_PRO,
MTK_QDMA_GLO_CFG);
return 0;
@@ -1243,6 +1344,7 @@ static int mtk_open(struct net_device *dev)
if (err)
return err;
+ napi_enable(&eth->tx_napi);
napi_enable(&eth->rx_napi);
mtk_irq_enable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
}
@@ -1291,6 +1393,7 @@ static int mtk_stop(struct net_device *dev)
return 0;
mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ napi_disable(&eth->tx_napi);
napi_disable(&eth->rx_napi);
mtk_stop_dma(eth, MTK_QDMA_GLO_CFG);
@@ -1328,7 +1431,11 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
/* Enable RX VLan Offloading */
mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
- err = devm_request_irq(eth->dev, eth->irq, mtk_handle_irq, 0,
+ err = devm_request_irq(eth->dev, eth->irq[1], mtk_handle_irq_tx, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ return err;
+ err = devm_request_irq(eth->dev, eth->irq[2], mtk_handle_irq_rx, 0,
dev_name(eth->dev), eth);
if (err)
return err;
@@ -1339,12 +1446,16 @@ static int __init mtk_hw_init(struct mtk_eth *eth)
/* disable delay and normal interrupt */
mtk_w32(eth, 0, MTK_QDMA_DELAY_INT);
- mtk_irq_disable(eth, MTK_TX_DONE_INT | MTK_RX_DONE_INT);
+ mtk_irq_disable(eth, ~0);
mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
mtk_w32(eth, 0, MTK_RST_GL);
/* FE int grouping */
- mtk_w32(eth, 0, MTK_FE_INT_GRP);
+ mtk_w32(eth, MTK_TX_DONE_INT, MTK_PDMA_INT_GRP1);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_PDMA_INT_GRP2);
+ mtk_w32(eth, MTK_TX_DONE_INT, MTK_QDMA_INT_GRP1);
+ mtk_w32(eth, MTK_RX_DONE_INT, MTK_QDMA_INT_GRP2);
+ mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
for (i = 0; i < 2; i++) {
u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
@@ -1390,9 +1501,7 @@ static void mtk_uninit(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
phy_disconnect(mac->phy_dev);
- mtk_mdio_cleanup(eth);
mtk_irq_disable(eth, ~0);
- free_irq(dev->irq, dev);
}
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1413,19 +1522,30 @@ static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void mtk_pending_work(struct work_struct *work)
{
- struct mtk_mac *mac = container_of(work, struct mtk_mac, pending_work);
- struct mtk_eth *eth = mac->hw;
- struct net_device *dev = eth->netdev[mac->id];
- int err;
+ struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
+ int err, i;
+ unsigned long restart = 0;
rtnl_lock();
- mtk_stop(dev);
- err = mtk_open(dev);
- if (err) {
- netif_alert(eth, ifup, dev,
- "Driver up/down cycle failed, closing device.\n");
- dev_close(dev);
+ /* stop all devices to make sure that dma is properly shut down */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ mtk_stop(eth->netdev[i]);
+ __set_bit(i, &restart);
+ }
+
+ /* restart DMA and enable IRQs */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!test_bit(i, &restart))
+ continue;
+ err = mtk_open(eth->netdev[i]);
+ if (err) {
+ netif_alert(eth, ifup, eth->netdev[i],
+ "Driver up/down cycle failed, closing device.\n");
+ dev_close(eth->netdev[i]);
+ }
}
rtnl_unlock();
}
@@ -1435,15 +1555,13 @@ static int mtk_cleanup(struct mtk_eth *eth)
int i;
for (i = 0; i < MTK_MAC_COUNT; i++) {
- struct mtk_mac *mac = netdev_priv(eth->netdev[i]);
-
if (!eth->netdev[i])
continue;
unregister_netdev(eth->netdev[i]);
free_netdev(eth->netdev[i]);
- cancel_work_sync(&mac->pending_work);
}
+ cancel_work_sync(&eth->pending_work);
return 0;
}
@@ -1631,7 +1749,6 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
mac->id = id;
mac->hw = eth;
mac->of_node = np;
- INIT_WORK(&mac->pending_work, mtk_pending_work);
mac->hw_stats = devm_kzalloc(eth->dev,
sizeof(*mac->hw_stats),
@@ -1642,9 +1759,11 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
goto free_netdev;
}
spin_lock_init(&mac->hw_stats->stats_lock);
+ u64_stats_init(&mac->hw_stats->syncp);
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
+ eth->netdev[id]->watchdog_timeo = 5 * HZ;
eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
eth->netdev[id]->base_addr = (unsigned long)eth->base;
eth->netdev[id]->vlan_features = MTK_HW_FEATURES &
@@ -1657,10 +1776,10 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
dev_err(eth->dev, "error bringing up device\n");
goto free_netdev;
}
- eth->netdev[id]->irq = eth->irq;
+ eth->netdev[id]->irq = eth->irq[0];
netif_info(eth, probe, eth->netdev[id],
"mediatek frame engine at 0x%08lx, irq %d\n",
- eth->netdev[id]->base_addr, eth->netdev[id]->irq);
+ eth->netdev[id]->base_addr, eth->irq[0]);
return 0;
@@ -1677,10 +1796,7 @@ static int mtk_probe(struct platform_device *pdev)
struct mtk_soc_data *soc;
struct mtk_eth *eth;
int err;
-
- err = device_reset(&pdev->dev);
- if (err)
- return err;
+ int i;
match = of_match_device(of_mtk_match, &pdev->dev);
soc = (struct mtk_soc_data *)match->data;
@@ -1689,11 +1805,13 @@ static int mtk_probe(struct platform_device *pdev)
if (!eth)
return -ENOMEM;
+ eth->dev = &pdev->dev;
eth->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
spin_lock_init(&eth->page_lock);
+ spin_lock_init(&eth->irq_lock);
eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
"mediatek,ethsys");
@@ -1715,27 +1833,30 @@ static int mtk_probe(struct platform_device *pdev)
return PTR_ERR(eth->rstc);
}
- eth->irq = platform_get_irq(pdev, 0);
- if (eth->irq < 0) {
- dev_err(&pdev->dev, "no IRQ resource found\n");
- return -ENXIO;
+ for (i = 0; i < 3; i++) {
+ eth->irq[i] = platform_get_irq(pdev, i);
+ if (eth->irq[i] < 0) {
+ dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
+ return -ENXIO;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
+ eth->clks[i] = devm_clk_get(eth->dev,
+ mtk_clks_source_name[i]);
+ if (IS_ERR(eth->clks[i])) {
+ if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ return -ENODEV;
+ }
}
- eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
- eth->clk_esw = devm_clk_get(&pdev->dev, "esw");
- eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1");
- eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2");
- if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
- IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
- return -ENODEV;
-
- clk_prepare_enable(eth->clk_ethif);
- clk_prepare_enable(eth->clk_esw);
- clk_prepare_enable(eth->clk_gp1);
- clk_prepare_enable(eth->clk_gp2);
+ clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
+ clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
+ clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
+ clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
- eth->dev = &pdev->dev;
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
+ INIT_WORK(&eth->pending_work, mtk_pending_work);
err = mtk_hw_init(eth);
if (err)
@@ -1758,7 +1879,9 @@ static int mtk_probe(struct platform_device *pdev)
* for NAPI to work
*/
init_dummy_netdev(&eth->dummy_dev);
- netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_poll,
+ netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx,
+ MTK_NAPI_WEIGHT);
+ netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx,
MTK_NAPI_WEIGHT);
platform_set_drvdata(pdev, eth);
@@ -1773,14 +1896,24 @@ err_free_dev:
static int mtk_remove(struct platform_device *pdev)
{
struct mtk_eth *eth = platform_get_drvdata(pdev);
+ int i;
- clk_disable_unprepare(eth->clk_ethif);
- clk_disable_unprepare(eth->clk_esw);
- clk_disable_unprepare(eth->clk_gp1);
- clk_disable_unprepare(eth->clk_gp2);
+ /* stop all devices to make sure that dma is properly shut down */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ mtk_stop(eth->netdev[i]);
+ }
+ clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
+
+ netif_napi_del(&eth->tx_napi);
netif_napi_del(&eth->rx_napi);
mtk_cleanup(eth);
+ mtk_mdio_cleanup(eth);
platform_set_drvdata(pdev, NULL);
return 0;
@@ -1790,13 +1923,13 @@ const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt7623-eth" },
{},
};
+MODULE_DEVICE_TABLE(of, of_mtk_match);
static struct platform_driver mtk_driver = {
.probe = mtk_probe,
.remove = mtk_remove,
.driver = {
.name = "mtk_soc_eth",
- .owner = THIS_MODULE,
.of_match_table = of_mtk_match,
},
};
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index 48a5292c8ed8..6e1ade7a25c5 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -68,6 +68,10 @@
/* Unicast Filter MAC Address Register - High */
#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
+/* PDMA Interrupt grouping registers */
+#define MTK_PDMA_INT_GRP1 0xa50
+#define MTK_PDMA_INT_GRP2 0xa54
+
/* QDMA TX Queue Configuration Registers */
#define MTK_QTX_CFG(x) (0x1800 + (x * 0x10))
#define QDMA_RES_THRES 4
@@ -91,6 +95,7 @@
#define MTK_QDMA_GLO_CFG 0x1A04
#define MTK_RX_2B_OFFSET BIT(31)
#define MTK_RX_BT_32DWORDS (3 << 11)
+#define MTK_NDP_CO_PRO BIT(10)
#define MTK_TX_WB_DDONE BIT(6)
#define MTK_DMA_SIZE_16DWORDS (2 << 4)
#define MTK_RX_DMA_BUSY BIT(3)
@@ -124,6 +129,11 @@
#define MTK_TX_DONE_INT (MTK_TX_DONE_INT0 | MTK_TX_DONE_INT1 | \
MTK_TX_DONE_INT2 | MTK_TX_DONE_INT3)
+/* QDMA Interrupt grouping registers */
+#define MTK_QDMA_INT_GRP1 0x1a20
+#define MTK_QDMA_INT_GRP2 0x1a24
+#define MTK_RLS_DONE_INT BIT(0)
+
/* QDMA Interrupt Status Register */
#define MTK_QDMA_INT_MASK 0x1A1C
@@ -280,6 +290,17 @@ enum mtk_tx_flags {
MTK_TX_FLAGS_PAGE0 = 0x02,
};
+/* This enum allows us to identify how the clock is defined on the array of the
+ * clock in the order
+ */
+enum mtk_clks_map {
+ MTK_CLK_ETHIF,
+ MTK_CLK_ESW,
+ MTK_CLK_GP1,
+ MTK_CLK_GP2,
+ MTK_CLK_MAX
+};
+
/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
* by the TX descriptor s
* @skb: The SKB pointer of the packet being sent
@@ -355,14 +376,14 @@ struct mtk_rx_ring {
* @dma_refcnt: track how many netdevs are using the DMA engine
* @tx_ring: Pointer to the memore holding info about the TX ring
* @rx_ring: Pointer to the memore holding info about the RX ring
- * @rx_napi: The NAPI struct
+ * @tx_napi: The TX NAPI struct
+ * @rx_napi: The RX NAPI struct
* @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
+ * @phy_scratch_ring: physical address of scratch_ring
* @scratch_head: The scratch memory that scratch_ring points to.
- * @clk_ethif: The ethif clock
- * @clk_esw: The switch clock
- * @clk_gp1: The gmac1 clock
- * @clk_gp2: The gmac2 clock
+ * @clks: clock array for all clocks required
* @mii_bus: If there is a bus we need to create an instance for it
+ * @pending_work: The workqueue used to reset the dma ring
*/
struct mtk_eth {
@@ -370,10 +391,11 @@ struct mtk_eth {
void __iomem *base;
struct reset_control *rstc;
spinlock_t page_lock;
+ spinlock_t irq_lock;
struct net_device dummy_dev;
struct net_device *netdev[MTK_MAX_DEVS];
struct mtk_mac *mac[MTK_MAX_DEVS];
- int irq;
+ int irq[3];
u32 msg_enable;
unsigned long sysclk;
struct regmap *ethsys;
@@ -381,14 +403,15 @@ struct mtk_eth {
atomic_t dma_refcnt;
struct mtk_tx_ring tx_ring;
struct mtk_rx_ring rx_ring;
+ struct napi_struct tx_napi;
struct napi_struct rx_napi;
struct mtk_tx_dma *scratch_ring;
+ dma_addr_t phy_scratch_ring;
void *scratch_head;
- struct clk *clk_ethif;
- struct clk *clk_esw;
- struct clk *clk_gp1;
- struct clk *clk_gp2;
+ struct clk *clks[MTK_CLK_MAX];
+
struct mii_bus *mii_bus;
+ struct work_struct pending_work;
};
/* struct mtk_mac - the structure that holds the info about the MACs of the
@@ -398,7 +421,6 @@ struct mtk_eth {
* @hw: Backpointer to our main datastruture
* @hw_stats: Packet statistics counter
* @phy_dev: The attached PHY if available
- * @pending_work: The workqueue used to reset the dma ring
*/
struct mtk_mac {
int id;
@@ -406,7 +428,6 @@ struct mtk_mac {
struct mtk_eth *hw;
struct mtk_hw_stats *hw_stats;
struct phy_device *phy_dev;
- struct work_struct pending_work;
};
/* the struct describing the SoC. these are declared in the soc_xyz.c files */
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 9ca3734ebb6b..5098e7f21987 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -24,13 +24,6 @@ config MLX4_EN_DCB
If unsure, set to Y
-config MLX4_EN_VXLAN
- bool "VXLAN offloads Support"
- default y
- depends on MLX4_EN && VXLAN && !(MLX4_EN=y && VXLAN=m)
- ---help---
- Say Y here if you want to use VXLAN offloads in the driver.
-
config MLX4_CORE
tristate
depends on PCI
diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c
index 0c51c69f802f..249a4584401a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c
@@ -576,41 +576,48 @@ out:
return res;
}
-/*
- * Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0. If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
- */
-int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
- struct mlx4_buf *buf, gfp_t gfp)
+static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
+ struct mlx4_buf *buf, gfp_t gfp)
{
dma_addr_t t;
- if (size <= max_direct) {
- buf->nbufs = 1;
- buf->npages = 1;
- buf->page_shift = get_order(size) + PAGE_SHIFT;
- buf->direct.buf = dma_alloc_coherent(&dev->persist->pdev->dev,
- size, &t, gfp);
- if (!buf->direct.buf)
- return -ENOMEM;
+ buf->nbufs = 1;
+ buf->npages = 1;
+ buf->page_shift = get_order(size) + PAGE_SHIFT;
+ buf->direct.buf =
+ dma_zalloc_coherent(&dev->persist->pdev->dev,
+ size, &t, gfp);
+ if (!buf->direct.buf)
+ return -ENOMEM;
- buf->direct.map = t;
+ buf->direct.map = t;
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
+ while (t & ((1 << buf->page_shift) - 1)) {
+ --buf->page_shift;
+ buf->npages *= 2;
+ }
- memset(buf->direct.buf, 0, size);
+ return 0;
+}
+
+/* Handling for queue buffers -- we allocate a bunch of memory and
+ * register it in a memory region at HCA virtual address 0. If the
+ * requested size is > max_direct, we split the allocation into
+ * multiple pages, so we don't require too much contiguous memory.
+ */
+int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
+ struct mlx4_buf *buf, gfp_t gfp)
+{
+ if (size <= max_direct) {
+ return mlx4_buf_direct_alloc(dev, size, buf, gfp);
} else {
+ dma_addr_t t;
int i;
- buf->direct.buf = NULL;
- buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- buf->npages = buf->nbufs;
+ buf->direct.buf = NULL;
+ buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
+ buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
gfp);
@@ -619,28 +626,12 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf =
- dma_alloc_coherent(&dev->persist->pdev->dev,
- PAGE_SIZE,
- &t, gfp);
+ dma_zalloc_coherent(&dev->persist->pdev->dev,
+ PAGE_SIZE, &t, gfp);
if (!buf->page_list[i].buf)
goto err_free;
buf->page_list[i].map = t;
-
- memset(buf->page_list[i].buf, 0, PAGE_SIZE);
- }
-
- if (BITS_PER_LONG == 64) {
- struct page **pages;
- pages = kmalloc(sizeof *pages * buf->nbufs, gfp);
- if (!pages)
- goto err_free;
- for (i = 0; i < buf->nbufs; ++i)
- pages[i] = virt_to_page(buf->page_list[i].buf);
- buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- if (!buf->direct.buf)
- goto err_free;
}
}
@@ -655,15 +646,11 @@ EXPORT_SYMBOL_GPL(mlx4_buf_alloc);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
{
- int i;
-
- if (buf->nbufs == 1)
+ if (buf->nbufs == 1) {
dma_free_coherent(&dev->persist->pdev->dev, size,
- buf->direct.buf,
- buf->direct.map);
- else {
- if (BITS_PER_LONG == 64)
- vunmap(buf->direct.buf);
+ buf->direct.buf, buf->direct.map);
+ } else {
+ int i;
for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf)
@@ -789,7 +776,7 @@ void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db)
EXPORT_SYMBOL_GPL(mlx4_db_free);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
- int size, int max_direct)
+ int size)
{
int err;
@@ -799,7 +786,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
*wqres->db.db = 0;
- err = mlx4_buf_alloc(dev, size, max_direct, &wqres->buf, GFP_KERNEL);
+ err = mlx4_buf_direct_alloc(dev, size, &wqres->buf, GFP_KERNEL);
if (err)
goto err_db;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e94ca1c3fc7c..f04a423ff79d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2597,7 +2597,6 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
priv->cmd.free_head = 0;
sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
- spin_lock_init(&priv->cmd.context_lock);
for (priv->cmd.token_mask = 1;
priv->cmd.token_mask < priv->cmd.max_cmds;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index af975a2b74c6..132cea655920 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -73,22 +73,16 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
*/
set_dev_node(&mdev->dev->persist->pdev->dev, node);
err = mlx4_alloc_hwq_res(mdev->dev, &cq->wqres,
- cq->buf_size, 2 * PAGE_SIZE);
+ cq->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err)
goto err_cq;
- err = mlx4_en_map_buffer(&cq->wqres.buf);
- if (err)
- goto err_res;
-
cq->buf = (struct mlx4_cqe *)cq->wqres.buf.direct.buf;
*pcq = cq;
return 0;
-err_res:
- mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
err_cq:
kfree(cq);
*pcq = NULL;
@@ -177,7 +171,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_cq *cq = *pcq;
- mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
cq->is_tx == RX)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index f01918c63f28..b04760a5034b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -37,6 +37,11 @@
#include "mlx4_en.h"
#include "fw_qos.h"
+enum {
+ MLX4_CEE_STATE_DOWN = 0,
+ MLX4_CEE_STATE_UP = 1,
+};
+
/* Definitions for QCN
*/
@@ -80,13 +85,205 @@ struct mlx4_congestion_control_mb_prio_802_1_qau_statistics {
__be32 reserved3[4];
};
+static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PFC:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = priv->dcbx_cap;
+ break;
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 1 << mlx4_max_tc(priv->mdev->dev);
+ break;
+ default:
+ *cap = false;
+ break;
+ }
+
+ return 0;
+}
+
+static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ return priv->cee_config.pfc_state;
+}
+
+static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ priv->cee_config.pfc_state = state;
+}
+
+static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
+ u8 *setting)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ *setting = priv->cee_config.dcb_pfc[priority];
+}
+
+static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
+ u8 setting)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ priv->cee_config.dcb_pfc[priority] = setting;
+ priv->cee_config.pfc_state = true;
+}
+
+static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ if (!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
+ return -EINVAL;
+
+ if (tcid == DCB_NUMTCS_ATTR_PFC)
+ *num = mlx4_max_tc(priv->mdev->dev);
+ else
+ *num = 0;
+
+ return 0;
+}
+
+static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 1;
+
+ if (priv->cee_config.pfc_state) {
+ int tc;
+
+ priv->prof->rx_pause = 0;
+ priv->prof->tx_pause = 0;
+ for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
+ u8 tc_mask = 1 << tc;
+
+ switch (priv->cee_config.dcb_pfc[tc]) {
+ case pfc_disabled:
+ priv->prof->tx_ppp &= ~tc_mask;
+ priv->prof->rx_ppp &= ~tc_mask;
+ break;
+ case pfc_enabled_full:
+ priv->prof->tx_ppp |= tc_mask;
+ priv->prof->rx_ppp |= tc_mask;
+ break;
+ case pfc_enabled_tx:
+ priv->prof->tx_ppp |= tc_mask;
+ priv->prof->rx_ppp &= ~tc_mask;
+ break;
+ case pfc_enabled_rx:
+ priv->prof->tx_ppp &= ~tc_mask;
+ priv->prof->rx_ppp |= tc_mask;
+ break;
+ default:
+ break;
+ }
+ }
+ en_dbg(DRV, priv, "Set pfc on\n");
+ } else {
+ priv->prof->rx_pause = 1;
+ priv->prof->tx_pause = 1;
+ en_dbg(DRV, priv, "Set pfc off\n");
+ }
+
+ if (mlx4_SET_PORT_general(mdev->dev, priv->port,
+ priv->rx_skb_size + ETH_FCS_LEN,
+ priv->prof->tx_pause,
+ priv->prof->tx_ppp,
+ priv->prof->rx_pause,
+ priv->prof->rx_ppp)) {
+ en_err(priv, "Failed setting pause params\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (priv->flags & MLX4_EN_FLAG_DCB_ENABLED)
+ return MLX4_CEE_STATE_UP;
+
+ return MLX4_CEE_STATE_DOWN;
+}
+
+static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int num_tcs = 0;
+
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 1;
+
+ if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
+ return 0;
+
+ if (state) {
+ priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
+ num_tcs = IEEE_8021QAZ_MAX_TCS;
+ } else {
+ priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
+ }
+
+ if (mlx4_en_setup_tc(dev, num_tcs))
+ return 1;
+
+ return 0;
+}
+
+/* On success returns a non-zero 802.1p user priority bitmap
+ * otherwise returns 0 as the invalid user priority bitmap to
+ * indicate an error.
+ */
+static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct dcb_app app = {
+ .selector = idtype,
+ .protocol = id,
+ };
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 0;
+
+ return dcb_getapp(netdev, &app);
+}
+
+static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
+ u16 id, u8 up)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+ struct dcb_app app;
+
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return -EINVAL;
+
+ memset(&app, 0, sizeof(struct dcb_app));
+ app.selector = idtype;
+ app.protocol = id;
+ app.priority = up;
+
+ return dcb_setapp(netdev, &app);
+}
+
static int mlx4_en_dcbnl_ieee_getets(struct net_device *dev,
struct ieee_ets *ets)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct ieee_ets *my_ets = &priv->ets;
- /* No IEEE PFC settings available */
if (!my_ets)
return -EINVAL;
@@ -237,18 +434,51 @@ static int mlx4_en_dcbnl_ieee_setpfc(struct net_device *dev,
static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
{
- return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ return priv->dcbx_cap;
}
static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct ieee_ets ets = {0};
+ struct ieee_pfc pfc = {0};
+
+ if (mode == priv->dcbx_cap)
+ return 0;
+
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
- (mode & DCB_CAP_DCBX_VER_CEE) ||
- !(mode & DCB_CAP_DCBX_VER_IEEE) ||
+ ((mode & DCB_CAP_DCBX_VER_IEEE) &&
+ (mode & DCB_CAP_DCBX_VER_CEE)) ||
!(mode & DCB_CAP_DCBX_HOST))
- return 1;
+ goto err;
+
+ priv->dcbx_cap = mode;
+
+ ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
+ pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
+
+ if (mode & DCB_CAP_DCBX_VER_IEEE) {
+ if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
+ goto err;
+ if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
+ goto err;
+ } else if (mode & DCB_CAP_DCBX_VER_CEE) {
+ if (mlx4_en_dcbnl_set_all(dev))
+ goto err;
+ } else {
+ if (mlx4_en_dcbnl_ieee_setets(dev, &ets))
+ goto err;
+ if (mlx4_en_dcbnl_ieee_setpfc(dev, &pfc))
+ goto err;
+ if (mlx4_en_setup_tc(dev, 0))
+ goto err;
+ }
return 0;
+err:
+ return 1;
}
#define MLX4_RATELIMIT_UNITS_IN_KB 100000 /* rate-limit HW unit in Kbps */
@@ -463,24 +693,46 @@ static int mlx4_en_dcbnl_ieee_getqcnstats(struct net_device *dev,
}
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_ops = {
- .ieee_getets = mlx4_en_dcbnl_ieee_getets,
- .ieee_setets = mlx4_en_dcbnl_ieee_setets,
- .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
- .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
- .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
- .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
+ .ieee_getets = mlx4_en_dcbnl_ieee_getets,
+ .ieee_setets = mlx4_en_dcbnl_ieee_setets,
+ .ieee_getmaxrate = mlx4_en_dcbnl_ieee_getmaxrate,
+ .ieee_setmaxrate = mlx4_en_dcbnl_ieee_setmaxrate,
+ .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
+ .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
+ .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
+ .ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
+ .ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
+
+ .getstate = mlx4_en_dcbnl_get_state,
+ .setstate = mlx4_en_dcbnl_set_state,
+ .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
+ .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
+ .setall = mlx4_en_dcbnl_set_all,
+ .getcap = mlx4_en_dcbnl_getcap,
+ .getnumtcs = mlx4_en_dcbnl_getnumtcs,
+ .getpfcstate = mlx4_en_dcbnl_getpfcstate,
+ .setpfcstate = mlx4_en_dcbnl_setpfcstate,
+ .getapp = mlx4_en_dcbnl_getapp,
+ .setapp = mlx4_en_dcbnl_setapp,
.getdcbx = mlx4_en_dcbnl_getdcbx,
.setdcbx = mlx4_en_dcbnl_setdcbx,
- .ieee_getqcn = mlx4_en_dcbnl_ieee_getqcn,
- .ieee_setqcn = mlx4_en_dcbnl_ieee_setqcn,
- .ieee_getqcnstats = mlx4_en_dcbnl_ieee_getqcnstats,
};
const struct dcbnl_rtnl_ops mlx4_en_dcbnl_pfc_ops = {
.ieee_getpfc = mlx4_en_dcbnl_ieee_getpfc,
.ieee_setpfc = mlx4_en_dcbnl_ieee_setpfc,
+ .setstate = mlx4_en_dcbnl_set_state,
+ .getpfccfg = mlx4_en_dcbnl_get_pfc_cfg,
+ .setpfccfg = mlx4_en_dcbnl_set_pfc_cfg,
+ .setall = mlx4_en_dcbnl_set_all,
+ .getnumtcs = mlx4_en_dcbnl_getnumtcs,
+ .getpfcstate = mlx4_en_dcbnl_getpfcstate,
+ .setpfcstate = mlx4_en_dcbnl_setpfcstate,
+ .getapp = mlx4_en_dcbnl_getapp,
+ .setapp = mlx4_en_dcbnl_setapp,
+
.getdcbx = mlx4_en_dcbnl_getdcbx,
.setdcbx = mlx4_en_dcbnl_setdcbx,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index c761194bb323..bdda17d2ea0f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -362,7 +362,7 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
for (i = 0; i < NUM_MAIN_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
- data[index++] = ((unsigned long *)&priv->stats)[i];
+ data[index++] = ((unsigned long *)&dev->stats)[i];
for (i = 0; i < NUM_PORT_STATS; i++, bitmap_iterator_inc(&it))
if (bitmap_iterator_test(&it))
@@ -1042,6 +1042,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
u32 rx_size, tx_size;
int port_up = 0;
int err = 0;
@@ -1061,22 +1063,25 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
tx_size == priv->tx_ring[0]->size)
return 0;
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.tx_ring_size = tx_size;
+ new_prof.rx_ring_size = rx_size;
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
- priv->prof->tx_ring_size = tx_size;
- priv->prof->rx_ring_size = rx_size;
+ mlx4_en_safe_replace_resources(priv, tmp);
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
@@ -1084,8 +1089,8 @@ static int mlx4_en_set_ringparam(struct net_device *dev,
}
err = mlx4_en_moderation_update(priv);
-
out:
+ kfree(tmp);
mutex_unlock(&mdev->state_lock);
return err;
}
@@ -1107,7 +1112,7 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- return priv->rx_ring_num;
+ return rounddown_pow_of_two(priv->rx_ring_num);
}
static u32 mlx4_en_get_rxfh_key_size(struct net_device *netdev)
@@ -1141,19 +1146,17 @@ static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
u8 *hfunc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- struct mlx4_en_rss_map *rss_map = &priv->rss_map;
- int rss_rings;
- size_t n = priv->rx_ring_num;
+ u32 n = mlx4_en_get_rxfh_indir_size(dev);
+ u32 i, rss_rings;
int err = 0;
- rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
- rss_rings = 1 << ilog2(rss_rings);
+ rss_rings = priv->prof->rss_rings ?: n;
+ rss_rings = rounddown_pow_of_two(rss_rings);
- while (n--) {
+ for (i = 0; i < n; i++) {
if (!ring_index)
break;
- ring_index[n] = rss_map->qps[n % rss_rings].qpn -
- rss_map->base_qpn;
+ ring_index[i] = i % rss_rings;
}
if (key)
memcpy(key, priv->rss_key, MLX4_EN_RSS_KEY_SIZE);
@@ -1166,6 +1169,7 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
const u8 *key, const u8 hfunc)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ u32 n = mlx4_en_get_rxfh_indir_size(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int port_up = 0;
int err = 0;
@@ -1175,18 +1179,18 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
/* Calculate RSS table size and make sure flows are spread evenly
* between rings
*/
- for (i = 0; i < priv->rx_ring_num; i++) {
+ for (i = 0; i < n; i++) {
if (!ring_index)
- continue;
+ break;
if (i > 0 && !ring_index[i] && !rss_rings)
rss_rings = i;
- if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
+ if (ring_index[i] != (i % (rss_rings ?: n)))
return -EINVAL;
}
if (!rss_rings)
- rss_rings = priv->rx_ring_num;
+ rss_rings = n;
/* RSS table size must be an order of 2 */
if (!is_power_of_2(rss_rings))
@@ -1714,6 +1718,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
int port_up = 0;
int err = 0;
@@ -1723,25 +1729,35 @@ static int mlx4_en_set_channels(struct net_device *dev,
!channel->tx_count || !channel->rx_count)
return -EINVAL;
- mutex_lock(&mdev->state_lock);
- if (priv->port_up) {
- port_up = 1;
- mlx4_en_stop_port(dev, 1);
+ if (channel->tx_count * MLX4_EN_NUM_UP <= priv->xdp_ring_num) {
+ en_err(priv, "Minimum %d tx channels required with XDP on\n",
+ priv->xdp_ring_num / MLX4_EN_NUM_UP + 1);
+ return -EINVAL;
}
- mlx4_en_free_resources(priv);
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
- priv->num_tx_rings_p_up = channel->tx_count;
- priv->tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
- priv->rx_ring_num = channel->rx_count;
+ mutex_lock(&mdev->state_lock);
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ new_prof.num_tx_rings_p_up = channel->tx_count;
+ new_prof.tx_ring_num = channel->tx_count * MLX4_EN_NUM_UP;
+ new_prof.rx_ring_num = channel->rx_count;
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
goto out;
+
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev, 1);
}
- netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
+ mlx4_en_safe_replace_resources(priv, tmp);
+
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num -
+ priv->xdp_ring_num);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
if (dev->num_tc)
@@ -1757,8 +1773,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
}
err = mlx4_en_moderation_update(priv);
-
out:
+ kfree(tmp);
mutex_unlock(&mdev->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index b4b258c8ca47..fedb829276f4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -31,6 +31,7 @@
*
*/
+#include <linux/bpf.h>
#include <linux/etherdevice.h>
#include <linux/tcp.h>
#include <linux/if_vlan.h>
@@ -67,6 +68,18 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
offset += priv->num_tx_rings_p_up;
}
+#ifdef CONFIG_MLX4_EN_DCB
+ if (!mlx4_is_slave(priv->mdev->dev)) {
+ if (up) {
+ if (priv->dcbx_cap)
+ priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
+ } else {
+ priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
+ priv->cee_config.pfc_state = false;
+ }
+ }
+#endif /* CONFIG_MLX4_EN_DCB */
+
return 0;
}
@@ -406,14 +419,18 @@ static int mlx4_en_vlan_rx_add_vid(struct net_device *dev,
mutex_lock(&mdev->state_lock);
if (mdev->device_up && priv->port_up) {
err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
- if (err)
+ if (err) {
en_err(priv, "Failed configuring VLAN filter\n");
+ goto out;
+ }
}
- if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
- en_dbg(HW, priv, "failed adding vlan %d\n", vid);
- mutex_unlock(&mdev->state_lock);
+ err = mlx4_register_vlan(mdev->dev, priv->port, vid, &idx);
+ if (err)
+ en_dbg(HW, priv, "Failed adding vlan %d\n", vid);
- return 0;
+out:
+ mutex_unlock(&mdev->state_lock);
+ return err;
}
static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
@@ -421,7 +438,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- int err;
+ int err = 0;
en_dbg(HW, priv, "Killing VID:%d\n", vid);
@@ -438,7 +455,7 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
}
mutex_unlock(&mdev->state_lock);
- return 0;
+ return err;
}
static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
@@ -1197,8 +1214,8 @@ static void mlx4_en_netpoll(struct net_device *dev)
struct mlx4_en_cq *cq;
int i;
- for (i = 0; i < priv->rx_ring_num; i++) {
- cq = priv->rx_cq[i];
+ for (i = 0; i < priv->tx_ring_num; i++) {
+ cq = priv->tx_cq[i];
napi_schedule(&cq->napi);
}
}
@@ -1296,15 +1313,16 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
}
-static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
spin_lock_bh(&priv->stats_lock);
- memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
+ netdev_stats_to_stats64(stats, &dev->stats);
spin_unlock_bh(&priv->stats_lock);
- return &priv->ret_stats;
+ return stats;
}
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
@@ -1505,6 +1523,24 @@ static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
}
+static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
+ int tx_ring_idx)
+{
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[tx_ring_idx];
+ int rr_index;
+
+ rr_index = (priv->xdp_ring_num - priv->tx_ring_num) + tx_ring_idx;
+ if (rr_index >= 0) {
+ tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
+ tx_ring->recycle_ring = priv->rx_ring[rr_index];
+ en_dbg(DRV, priv,
+ "Set tx_ring[%d]->recycle_ring = rx_ring[%d]\n",
+ tx_ring_idx, rr_index);
+ } else {
+ tx_ring->recycle_ring = NULL;
+ }
+}
+
int mlx4_en_start_port(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -1627,6 +1663,8 @@ int mlx4_en_start_port(struct net_device *dev)
}
tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
+ mlx4_en_init_recycle_ring(priv, i);
+
/* Arm CQ for TX completions */
mlx4_en_arm_cq(priv, cq);
@@ -1691,10 +1729,9 @@ int mlx4_en_start_port(struct net_device *dev)
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->rx_mode_task);
-#ifdef CONFIG_MLX4_EN_VXLAN
if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
- vxlan_get_rx_port(dev);
-#endif
+ udp_tunnel_get_rx_info(dev);
+
priv->port_up = true;
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -1856,6 +1893,7 @@ static void mlx4_en_restart(struct work_struct *work)
en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
+ rtnl_lock();
mutex_lock(&mdev->state_lock);
if (priv->port_up) {
mlx4_en_stop_port(dev, 1);
@@ -1863,6 +1901,7 @@ static void mlx4_en_restart(struct work_struct *work)
en_err(priv, "Failed restarting port %d\n", priv->port);
}
mutex_unlock(&mdev->state_lock);
+ rtnl_unlock();
}
static void mlx4_en_clear_stats(struct net_device *dev)
@@ -1874,7 +1913,6 @@ static void mlx4_en_clear_stats(struct net_device *dev)
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
- memset(&priv->stats, 0, sizeof(priv->stats));
memset(&priv->pstats, 0, sizeof(priv->pstats));
memset(&priv->pkstats, 0, sizeof(priv->pkstats));
memset(&priv->port_stats, 0, sizeof(priv->port_stats));
@@ -1890,6 +1928,11 @@ static void mlx4_en_clear_stats(struct net_device *dev)
priv->tx_ring[i]->bytes = 0;
priv->tx_ring[i]->packets = 0;
priv->tx_ring[i]->tx_csum = 0;
+ priv->tx_ring[i]->tx_dropped = 0;
+ priv->tx_ring[i]->queue_stopped = 0;
+ priv->tx_ring[i]->wake_queue = 0;
+ priv->tx_ring[i]->tso_packets = 0;
+ priv->tx_ring[i]->xmit_more = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i]->bytes = 0;
@@ -1943,7 +1986,7 @@ static int mlx4_en_close(struct net_device *dev)
return 0;
}
-void mlx4_en_free_resources(struct mlx4_en_priv *priv)
+static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
{
int i;
@@ -1968,7 +2011,7 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
}
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
{
struct mlx4_en_port_profile *prof = priv->prof;
int i;
@@ -2025,11 +2068,91 @@ err:
return -ENOMEM;
}
+static void mlx4_en_shutdown(struct net_device *dev)
+{
+ rtnl_lock();
+ netif_device_detach(dev);
+ mlx4_en_close(dev);
+ rtnl_unlock();
+}
+
+static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
+ struct mlx4_en_priv *src,
+ struct mlx4_en_port_profile *prof)
+{
+ memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
+ sizeof(dst->hwtstamp_config));
+ dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
+ dst->tx_ring_num = prof->tx_ring_num;
+ dst->rx_ring_num = prof->rx_ring_num;
+ dst->flags = prof->flags;
+ dst->mdev = src->mdev;
+ dst->port = src->port;
+ dst->dev = src->dev;
+ dst->prof = prof;
+ dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
+ DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
+
+ dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
+ GFP_KERNEL);
+ if (!dst->tx_ring)
+ return -ENOMEM;
+
+ dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
+ GFP_KERNEL);
+ if (!dst->tx_cq) {
+ kfree(dst->tx_ring);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
+ struct mlx4_en_priv *src)
+{
+ memcpy(dst->rx_ring, src->rx_ring,
+ sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
+ memcpy(dst->rx_cq, src->rx_cq,
+ sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
+ memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
+ sizeof(dst->hwtstamp_config));
+ dst->tx_ring_num = src->tx_ring_num;
+ dst->rx_ring_num = src->rx_ring_num;
+ dst->tx_ring = src->tx_ring;
+ dst->tx_cq = src->tx_cq;
+ memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
+}
+
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp,
+ struct mlx4_en_port_profile *prof)
+{
+ mlx4_en_copy_priv(tmp, priv, prof);
+
+ if (mlx4_en_alloc_resources(tmp)) {
+ en_warn(priv,
+ "%s: Resource allocation failed, using previous configuration\n",
+ __func__);
+ kfree(tmp->tx_ring);
+ kfree(tmp->tx_cq);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp)
+{
+ mlx4_en_free_resources(priv);
+ mlx4_en_update_priv(priv, tmp);
+}
void mlx4_en_destroy_netdev(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ bool shutdown = mdev->dev->persist->interface_state &
+ MLX4_INTERFACE_STATE_SHUTDOWN;
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
@@ -2037,7 +2160,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
if (priv->registered) {
devlink_port_type_clear(mlx4_get_devlink_port(mdev->dev,
priv->port));
- unregister_netdev(dev);
+ if (shutdown)
+ mlx4_en_shutdown(dev);
+ else
+ unregister_netdev(dev);
}
if (priv->allocated)
@@ -2057,12 +2183,17 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
mdev->upper[priv->port] = NULL;
mutex_unlock(&mdev->state_lock);
+#ifdef CONFIG_RFS_ACCEL
+ mlx4_en_cleanup_filters(priv);
+#endif
+
mlx4_en_free_resources(priv);
kfree(priv->tx_ring);
kfree(priv->tx_cq);
- free_netdev(dev);
+ if (!shutdown)
+ free_netdev(dev);
}
static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
@@ -2078,6 +2209,11 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
en_err(priv, "Bad MTU size:%d.\n", new_mtu);
return -EPERM;
}
+ if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) {
+ en_err(priv, "MTU size:%d requires frags but XDP running\n",
+ new_mtu);
+ return -EOPNOTSUPP;
+ }
dev->mtu = new_mtu;
if (netif_running(dev)) {
@@ -2335,7 +2471,6 @@ static int mlx4_en_get_phys_port_id(struct net_device *dev,
return 0;
}
-#ifdef CONFIG_MLX4_EN_VXLAN
static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
{
int ret;
@@ -2355,8 +2490,12 @@ out:
}
/* set offloads */
- priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
+ priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
}
static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2365,8 +2504,12 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
vxlan_del_task);
/* unset offloads */
- priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
- NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
+ priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL);
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2377,15 +2520,19 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
}
static void mlx4_en_add_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ __be16 port = ti->port;
__be16 current_port;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (sa_family == AF_INET6)
+ if (ti->sa_family != AF_INET)
+ return;
+
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return;
current_port = priv->vxlan_port;
@@ -2400,15 +2547,19 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev,
}
static void mlx4_en_del_vxlan_port(struct net_device *dev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ __be16 port = ti->port;
__be16 current_port;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
return;
- if (sa_family == AF_INET6)
+ if (ti->sa_family != AF_INET)
+ return;
+
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
return;
current_port = priv->vxlan_port;
@@ -2425,9 +2576,24 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
netdev_features_t features)
{
features = vlan_features_check(skb, features);
- return vxlan_features_check(skb, features);
+ features = vxlan_features_check(skb, features);
+
+ /* The ConnectX-3 doesn't support outer IPv6 checksums but it does
+ * support inner IPv6 checksums and segmentation so we need to
+ * strip that feature if this is an IPv6 encapsulated frame.
+ */
+ if (skb->encapsulation &&
+ (skb->ip_summed == CHECKSUM_PARTIAL)) {
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ if (!priv->vxlan_port ||
+ (ip_hdr(skb)->version != 4) ||
+ (udp_hdr(skb)->dest != priv->vxlan_port))
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
+
+ return features;
}
-#endif
static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
{
@@ -2456,12 +2622,109 @@ static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 m
return err;
}
+static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ struct bpf_prog *old_prog;
+ int xdp_ring_num;
+ int port_up = 0;
+ int err;
+ int i;
+
+ xdp_ring_num = prog ? ALIGN(priv->rx_ring_num, MLX4_EN_NUM_UP) : 0;
+
+ /* No need to reconfigure buffers when simply swapping the
+ * program for a new one.
+ */
+ if (priv->xdp_ring_num == xdp_ring_num) {
+ if (prog) {
+ prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+ }
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ /* This xchg is paired with READ_ONCE in the fastpath */
+ old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
+ return 0;
+ }
+
+ if (priv->num_frags > 1) {
+ en_err(priv, "Cannot set XDP if MTU requires multiple frags\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (priv->tx_ring_num < xdp_ring_num + MLX4_EN_NUM_UP) {
+ en_err(priv,
+ "Minimum %d tx channels required to run XDP\n",
+ (xdp_ring_num + MLX4_EN_NUM_UP) / MLX4_EN_NUM_UP);
+ return -EINVAL;
+ }
+
+ if (prog) {
+ prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
+ if (IS_ERR(prog))
+ return PTR_ERR(prog);
+ }
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev, 1);
+ }
+
+ priv->xdp_ring_num = xdp_ring_num;
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num -
+ priv->xdp_ring_num);
+
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ old_prog = xchg(&priv->rx_ring[i]->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+ }
+
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+ if (err) {
+ en_err(priv, "Failed starting port %d for XDP change\n",
+ priv->port);
+ queue_work(mdev->workqueue, &priv->watchdog_task);
+ }
+ }
+
+ mutex_unlock(&mdev->state_lock);
+ return 0;
+}
+
+static bool mlx4_xdp_attached(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ return !!priv->xdp_ring_num;
+}
+
+static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mlx4_xdp_set(dev, xdp->prog);
+ case XDP_QUERY_PROG:
+ xdp->prog_attached = mlx4_xdp_attached(dev);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
- .ndo_get_stats = mlx4_en_get_stats,
+ .ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
@@ -2480,12 +2743,11 @@ static const struct net_device_ops mlx4_netdev_ops = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
-#ifdef CONFIG_MLX4_EN_VXLAN
- .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
- .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
+ .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
-#endif
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
+ .ndo_xdp = mlx4_xdp,
};
static const struct net_device_ops mlx4_netdev_ops_master = {
@@ -2493,7 +2755,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_stop = mlx4_en_close,
.ndo_start_xmit = mlx4_en_xmit,
.ndo_select_queue = mlx4_en_select_queue,
- .ndo_get_stats = mlx4_en_get_stats,
+ .ndo_get_stats64 = mlx4_en_get_stats64,
.ndo_set_rx_mode = mlx4_en_set_rx_mode,
.ndo_set_mac_address = mlx4_en_set_mac,
.ndo_validate_addr = eth_validate_addr,
@@ -2518,12 +2780,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
.ndo_rx_flow_steer = mlx4_en_filter_rfs,
#endif
.ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
-#ifdef CONFIG_MLX4_EN_VXLAN
- .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
- .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
+ .ndo_udp_tunnel_add = mlx4_en_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx4_en_del_vxlan_port,
.ndo_features_check = mlx4_en_features_check,
-#endif
.ndo_set_tx_maxrate = mlx4_en_set_tx_maxrate,
+ .ndo_xdp = mlx4_xdp,
};
struct mlx4_en_bond {
@@ -2813,10 +3074,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task);
-#ifdef CONFIG_MLX4_EN_VXLAN
INIT_WORK(&priv->vxlan_add_task, mlx4_en_add_vxlan_offloads);
INIT_WORK(&priv->vxlan_del_task, mlx4_en_del_vxlan_offloads);
-#endif
#ifdef CONFIG_RFS_ACCEL
INIT_LIST_HEAD(&priv->filters);
spin_lock_init(&priv->filters_lock);
@@ -2856,6 +3115,14 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->msg_enable = MLX4_EN_MSG_LEVEL;
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
+ priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
+ priv->flags |= MLX4_EN_DCB_ENABLED;
+ priv->cee_config.pfc_state = false;
+
+ for (i = 0; i < MLX4_EN_NUM_UP; i++)
+ priv->cee_config.dcb_pfc[i] = pfc_disabled;
+
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
} else {
@@ -2907,7 +3174,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/* Allocate page for receive rings */
err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
- MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
+ MLX4_EN_PAGE_SIZE);
if (err) {
en_err(priv, "Failed to allocate page for rx qps\n");
goto out;
@@ -2990,8 +3257,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
}
if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
- dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- dev->features |= NETIF_F_GSO_UDP_TUNNEL;
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
mdev->pndev[port] = dev;
@@ -3071,6 +3343,8 @@ int mlx4_en_reset_config(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
+ struct mlx4_en_port_profile new_prof;
+ struct mlx4_en_priv *tmp;
int port_up = 0;
int err = 0;
@@ -3087,19 +3361,29 @@ int mlx4_en_reset_config(struct net_device *dev,
return -EINVAL;
}
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return -ENOMEM;
+
mutex_lock(&mdev->state_lock);
+
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
+ memcpy(&new_prof.hwtstamp_config, &ts_config, sizeof(ts_config));
+
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
+ if (err)
+ goto out;
+
if (priv->port_up) {
port_up = 1;
mlx4_en_stop_port(dev, 1);
}
- mlx4_en_free_resources(priv);
-
en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
- ts_config.rx_filter, !!(features & NETIF_F_HW_VLAN_CTAG_RX));
+ ts_config.rx_filter,
+ !!(features & NETIF_F_HW_VLAN_CTAG_RX));
- priv->hwtstamp_config.tx_type = ts_config.tx_type;
- priv->hwtstamp_config.rx_filter = ts_config.rx_filter;
+ mlx4_en_safe_replace_resources(priv, tmp);
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
if (features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -3133,11 +3417,6 @@ int mlx4_en_reset_config(struct net_device *dev,
dev->features &= ~NETIF_F_HW_VLAN_CTAG_RX;
}
- err = mlx4_en_alloc_resources(priv);
- if (err) {
- en_err(priv, "Failed reallocating port resources\n");
- goto out;
- }
if (port_up) {
err = mlx4_en_start_port(dev);
if (err)
@@ -3146,6 +3425,8 @@ int mlx4_en_reset_config(struct net_device *dev,
out:
mutex_unlock(&mdev->state_lock);
- netdev_features_change(dev);
+ kfree(tmp);
+ if (!err)
+ netdev_features_change(dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 20b6c2e678b8..5aa8b751f417 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -152,8 +152,9 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
struct mlx4_counter tmp_counter_stats;
struct mlx4_en_stat_out_mbox *mlx4_en_stats;
struct mlx4_en_stat_out_flow_control_mbox *flowstats;
- struct mlx4_en_priv *priv = netdev_priv(mdev->pndev[port]);
- struct net_device_stats *stats = &priv->stats;
+ struct net_device *dev = mdev->pndev[port];
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
struct mlx4_cmd_mailbox *mailbox;
u64 in_mod = reset << 8 | port;
int err;
@@ -188,6 +189,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
}
stats->tx_packets = 0;
stats->tx_bytes = 0;
+ stats->tx_dropped = 0;
priv->port_stats.tx_chksum_offload = 0;
priv->port_stats.queue_stopped = 0;
priv->port_stats.wake_queue = 0;
@@ -199,6 +201,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->tx_packets += ring->packets;
stats->tx_bytes += ring->bytes;
+ stats->tx_dropped += ring->tx_dropped;
priv->port_stats.tx_chksum_offload += ring->tx_csum;
priv->port_stats.queue_stopped += ring->queue_stopped;
priv->port_stats.wake_queue += ring->wake_queue;
@@ -237,21 +240,12 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->multicast = en_stats_adder(&mlx4_en_stats->MCAST_prio_0,
&mlx4_en_stats->MCAST_prio_1,
NUM_PRIORITIES);
- stats->collisions = 0;
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP) +
sw_rx_dropped;
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
- stats->rx_over_errors = 0;
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
- stats->rx_frame_errors = 0;
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
- stats->rx_missed_errors = 0;
- stats->tx_aborted_errors = 0;
- stats->tx_carrier_errors = 0;
- stats->tx_fifo_errors = 0;
- stats->tx_heartbeat_errors = 0;
- stats->tx_window_errors = 0;
- stats->tx_dropped = be32_to_cpu(mlx4_en_stats->TDROP);
+ stats->tx_dropped += be32_to_cpu(mlx4_en_stats->TDROP);
/* RX stats */
priv->pkstats.rx_multicast_packets = stats->multicast;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 02e925d6f734..a6b0db0e0383 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -107,37 +107,6 @@ int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
return ret;
}
-int mlx4_en_map_buffer(struct mlx4_buf *buf)
-{
- struct page **pages;
- int i;
-
- if (BITS_PER_LONG == 64 || buf->nbufs == 1)
- return 0;
-
- pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
- if (!pages)
- return -ENOMEM;
-
- for (i = 0; i < buf->nbufs; ++i)
- pages[i] = virt_to_page(buf->page_list[i].buf);
-
- buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- if (!buf->direct.buf)
- return -ENOMEM;
-
- return 0;
-}
-
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf)
-{
- if (BITS_PER_LONG == 64 || buf->nbufs == 1)
- return;
-
- vunmap(buf->direct.buf);
-}
-
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event)
{
return;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index ca3a38421ee7..2040dad8611d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -32,6 +32,7 @@
*/
#include <net/busy_poll.h>
+#include <linux/bpf.h>
#include <linux/mlx4/cq.h>
#include <linux/slab.h>
#include <linux/mlx4/qp.h>
@@ -57,7 +58,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
struct page *page;
dma_addr_t dma;
- for (order = MLX4_EN_ALLOC_PREFER_ORDER; ;) {
+ for (order = frag_info->order; ;) {
gfp_t gfp = _gfp;
if (order)
@@ -70,7 +71,7 @@ static int mlx4_alloc_pages(struct mlx4_en_priv *priv,
return -ENOMEM;
}
dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE << order,
- PCI_DMA_FROMDEVICE);
+ frag_info->dma_dir);
if (dma_mapping_error(priv->ddev, dma)) {
put_page(page);
return -ENOMEM;
@@ -124,7 +125,8 @@ out:
while (i--) {
if (page_alloc[i].page != ring_alloc[i].page) {
dma_unmap_page(priv->ddev, page_alloc[i].dma,
- page_alloc[i].page_size, PCI_DMA_FROMDEVICE);
+ page_alloc[i].page_size,
+ priv->frag_info[i].dma_dir);
page = page_alloc[i].page;
/* Revert changes done by mlx4_alloc_pages */
page_ref_sub(page, page_alloc[i].page_size /
@@ -145,7 +147,7 @@ static void mlx4_en_free_frag(struct mlx4_en_priv *priv,
if (next_frag_end > frags[i].page_size)
dma_unmap_page(priv->ddev, frags[i].dma, frags[i].page_size,
- PCI_DMA_FROMDEVICE);
+ frag_info->dma_dir);
if (frags[i].page)
put_page(frags[i].page);
@@ -176,7 +178,8 @@ out:
page_alloc = &ring->page_alloc[i];
dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->page_size, PCI_DMA_FROMDEVICE);
+ page_alloc->page_size,
+ priv->frag_info[i].dma_dir);
page = page_alloc->page;
/* Revert changes done by mlx4_alloc_pages */
page_ref_sub(page, page_alloc->page_size /
@@ -201,7 +204,7 @@ static void mlx4_en_destroy_allocator(struct mlx4_en_priv *priv,
i, page_count(page_alloc->page));
dma_unmap_page(priv->ddev, page_alloc->dma,
- page_alloc->page_size, PCI_DMA_FROMDEVICE);
+ page_alloc->page_size, frag_info->dma_dir);
while (page_alloc->page_offset + frag_info->frag_stride <
page_alloc->page_size) {
put_page(page_alloc->page);
@@ -244,6 +247,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
struct mlx4_en_rx_alloc *frags = ring->rx_info +
(index << priv->log_rx_info);
+ if (ring->page_cache.index > 0) {
+ frags[0] = ring->page_cache.buf[--ring->page_cache.index];
+ rx_desc->data[0].addr = cpu_to_be64(frags[0].dma);
+ return 0;
+ }
+
return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
}
@@ -394,17 +403,11 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
/* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->persist->pdev->dev, node);
- err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres,
- ring->buf_size, 2 * PAGE_SIZE);
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err)
goto err_info;
- err = mlx4_en_map_buffer(&ring->wqres.buf);
- if (err) {
- en_err(priv, "Failed to map RX buffer\n");
- goto err_hwq;
- }
ring->buf = ring->wqres.buf.direct.buf;
ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
@@ -412,8 +415,6 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
*pring = ring;
return 0;
-err_hwq:
- mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_info:
vfree(ring->rx_info);
ring->rx_info = NULL;
@@ -510,27 +511,55 @@ void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
}
}
+/* When the rx ring is running in page-per-packet mode, a released frame can go
+ * directly into a small cache, to avoid unmapping or touching the page
+ * allocator. In bpf prog performance scenarios, buffers are either forwarded
+ * or dropped, never converted to skbs, so every page can come directly from
+ * this cache when it is sized to be a multiple of the napi budget.
+ */
+bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
+ struct mlx4_en_rx_alloc *frame)
+{
+ struct mlx4_en_page_cache *cache = &ring->page_cache;
+
+ if (cache->index >= MLX4_EN_CACHE_SIZE)
+ return false;
+
+ cache->buf[cache->index++] = *frame;
+ return true;
+}
+
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rx_ring *ring = *pring;
+ struct bpf_prog *old_prog;
- mlx4_en_unmap_buffer(&ring->wqres.buf);
+ old_prog = READ_ONCE(ring->xdp_prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
kfree(ring);
*pring = NULL;
-#ifdef CONFIG_RFS_ACCEL
- mlx4_en_cleanup_filters(priv);
-#endif
}
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring)
{
+ int i;
+
+ for (i = 0; i < ring->page_cache.index; i++) {
+ struct mlx4_en_rx_alloc *frame = &ring->page_cache.buf[i];
+
+ dma_unmap_page(priv->ddev, frame->dma, frame->page_size,
+ priv->frag_info[0].dma_dir);
+ put_page(frame->page);
+ }
+ ring->page_cache.index = 0;
mlx4_en_free_rx_buf(priv, ring);
if (ring->stride <= TXBB_SIZE)
ring->buf -= TXBB_SIZE;
@@ -752,7 +781,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
struct mlx4_en_rx_alloc *frags;
struct mlx4_en_rx_desc *rx_desc;
+ struct bpf_prog *xdp_prog;
+ int doorbell_pending;
struct sk_buff *skb;
+ int tx_index;
int index;
int nr;
unsigned int length;
@@ -768,6 +800,10 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (budget <= 0)
return polled;
+ xdp_prog = READ_ONCE(ring->xdp_prog);
+ doorbell_pending = 0;
+ tx_index = (priv->tx_ring_num - priv->xdp_ring_num) + cq->ring;
+
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
* reading 'cqe->index' */
@@ -844,6 +880,43 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
+ /* A bpf program gets first chance to drop the packet. It may
+ * read bytes but not past the end of the frag.
+ */
+ if (xdp_prog) {
+ struct xdp_buff xdp;
+ dma_addr_t dma;
+ u32 act;
+
+ dma = be64_to_cpu(rx_desc->data[0].addr);
+ dma_sync_single_for_cpu(priv->ddev, dma,
+ priv->frag_info[0].frag_size,
+ DMA_FROM_DEVICE);
+
+ xdp.data = page_address(frags[0].page) +
+ frags[0].page_offset;
+ xdp.data_end = xdp.data + length;
+
+ act = bpf_prog_run_xdp(xdp_prog, &xdp);
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ if (!mlx4_en_xmit_frame(frags, dev,
+ length, tx_index,
+ &doorbell_pending))
+ goto consumed;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ case XDP_ABORTED:
+ case XDP_DROP:
+ if (mlx4_en_rx_recycle(ring, frags))
+ goto consumed;
+ goto next;
+ }
+ }
+
if (likely(dev->features & NETIF_F_RXCSUM)) {
if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
MLX4_CQE_STATUS_UDP)) {
@@ -995,6 +1068,7 @@ next:
for (nr = 0; nr < priv->num_frags; nr++)
mlx4_en_free_frag(priv, frags, nr);
+consumed:
++cq->mcq.cons_index;
index = (cq->mcq.cons_index) & ring->size_mask;
cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
@@ -1003,6 +1077,9 @@ next:
}
out:
+ if (doorbell_pending)
+ mlx4_en_xmit_doorbell(priv->tx_ring[tx_index]);
+
AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
mlx4_cq_set_ci(&cq->mcq);
wmb(); /* ensure HW sees CQ consumer before we post new buffers */
@@ -1070,22 +1147,35 @@ static const int frag_sizes[] = {
void mlx4_en_calc_rx_buf(struct net_device *dev)
{
+ enum dma_data_direction dma_dir = PCI_DMA_FROMDEVICE;
struct mlx4_en_priv *priv = netdev_priv(dev);
- /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
- * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
- */
- int eff_mtu = dev->mtu + ETH_HLEN + (2 * VLAN_HLEN);
+ int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu);
+ int order = MLX4_EN_ALLOC_PREFER_ORDER;
+ u32 align = SMP_CACHE_BYTES;
int buf_size = 0;
int i = 0;
+ /* bpf requires buffers to be set up as 1 packet per page.
+ * This only works when num_frags == 1.
+ */
+ if (priv->xdp_ring_num) {
+ dma_dir = PCI_DMA_BIDIRECTIONAL;
+ /* This will gain efficient xdp frame recycling at the expense
+ * of more costly truesize accounting
+ */
+ align = PAGE_SIZE;
+ order = 0;
+ }
+
while (buf_size < eff_mtu) {
+ priv->frag_info[i].order = order;
priv->frag_info[i].frag_size =
(eff_mtu > buf_size + frag_sizes[i]) ?
frag_sizes[i] : eff_mtu - buf_size;
priv->frag_info[i].frag_prefix_size = buf_size;
priv->frag_info[i].frag_stride =
- ALIGN(priv->frag_info[i].frag_size,
- SMP_CACHE_BYTES);
+ ALIGN(priv->frag_info[i].frag_size, align);
+ priv->frag_info[i].dma_dir = dma_dir;
buf_size += priv->frag_info[i].frag_size;
i++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index a386f047c1af..e2509bba3e7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -41,6 +41,7 @@
#include <linux/vmalloc.h>
#include <linux/tcp.h>
#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/moduleparam.h>
#include "mlx4_en.h"
@@ -93,20 +94,13 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
/* Allocate HW buffers on provided NUMA node */
set_dev_node(&mdev->dev->persist->pdev->dev, node);
- err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size,
- 2 * PAGE_SIZE);
+ err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
if (err) {
en_err(priv, "Failed allocating hwq resources\n");
goto err_bounce;
}
- err = mlx4_en_map_buffer(&ring->wqres.buf);
- if (err) {
- en_err(priv, "Failed to map TX buffer\n");
- goto err_hwq_res;
- }
-
ring->buf = ring->wqres.buf.direct.buf;
en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
@@ -117,7 +111,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
MLX4_RESERVE_ETH_BF_QP);
if (err) {
en_err(priv, "failed reserving qp for TX ring\n");
- goto err_map;
+ goto err_hwq_res;
}
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
@@ -154,8 +148,6 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
err_reserve:
mlx4_qp_release_range(mdev->dev, ring->qpn, 1);
-err_map:
- mlx4_en_unmap_buffer(&ring->wqres.buf);
err_hwq_res:
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
err_bounce:
@@ -182,7 +174,6 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
mlx4_qp_remove(mdev->dev, &ring->qp);
mlx4_qp_free(mdev->dev, &ring->qp);
mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1);
- mlx4_en_unmap_buffer(&ring->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
kfree(ring->bounce_buf);
ring->bounce_buf = NULL;
@@ -205,6 +196,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
ring->last_nr_txbb = 1;
memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info));
memset(ring->buf, 0, ring->buf_size);
+ ring->free_tx_desc = mlx4_en_free_tx_desc;
ring->qp_state = MLX4_QP_STATE_RST;
ring->doorbell_qpn = cpu_to_be32(ring->qp.qpn << 8);
@@ -274,10 +266,10 @@ static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
}
-static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
- struct mlx4_en_tx_ring *ring,
- int index, u8 owner, u64 timestamp,
- int napi_mode)
+u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner, u64 timestamp,
+ int napi_mode)
{
struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
@@ -353,6 +345,27 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
return tx_info->nr_txbb;
}
+u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner, u64 timestamp,
+ int napi_mode)
+{
+ struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
+ struct mlx4_en_rx_alloc frame = {
+ .page = tx_info->page,
+ .dma = tx_info->map0_dma,
+ .page_offset = 0,
+ .page_size = PAGE_SIZE,
+ };
+
+ if (!mlx4_en_rx_recycle(ring->recycle_ring, &frame)) {
+ dma_unmap_page(priv->ddev, tx_info->map0_dma,
+ PAGE_SIZE, priv->frag_info[0].dma_dir);
+ put_page(tx_info->page);
+ }
+
+ return tx_info->nr_txbb;
+}
int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
{
@@ -371,7 +384,7 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
}
while (ring->cons != ring->prod) {
- ring->last_nr_txbb = mlx4_en_free_tx_desc(priv, ring,
+ ring->last_nr_txbb = ring->free_tx_desc(priv, ring,
ring->cons & ring->size_mask,
!!(ring->cons & ring->size), 0,
0 /* Non-NAPI caller */);
@@ -453,7 +466,7 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
timestamp = mlx4_en_get_cqe_ts(cqe);
/* free next descriptor */
- last_nr_txbb = mlx4_en_free_tx_desc(
+ last_nr_txbb = ring->free_tx_desc(
priv, ring, ring_index,
!!((ring_cons + txbbs_skipped) &
ring->size), timestamp, napi_budget);
@@ -485,6 +498,9 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb;
ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped;
+ if (ring->free_tx_desc == mlx4_en_recycle_tx_desc)
+ return done < budget;
+
netdev_tx_completed_queue(ring->tx_queue, packets, bytes);
/* Wakeup Tx queue if this stopped, and ring is not full.
@@ -640,8 +656,7 @@ static int get_real_size(const struct sk_buff *skb,
static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc,
const struct sk_buff *skb,
const struct skb_shared_info *shinfo,
- int real_size, u16 *vlan_tag,
- int tx_ind, void *fragptr)
+ void *fragptr)
{
struct mlx4_wqe_inline_seg *inl = &tx_desc->inl;
int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
@@ -709,10 +724,66 @@ static void mlx4_bf_copy(void __iomem *dst, const void *src,
__iowrite64_copy(dst, src, bytecnt / 8);
}
+void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring)
+{
+ wmb();
+ /* Since there is no iowrite*_native() that writes the
+ * value as is, without byteswapping - using the one
+ * the doesn't do byteswapping in the relevant arch
+ * endianness.
+ */
+#if defined(__LITTLE_ENDIAN)
+ iowrite32(
+#else
+ iowrite32be(
+#endif
+ ring->doorbell_qpn,
+ ring->bf.uar->map + MLX4_SEND_DOORBELL);
+}
+
+static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring,
+ struct mlx4_en_tx_desc *tx_desc,
+ union mlx4_wqe_qpn_vlan qpn_vlan,
+ int desc_size, int bf_index,
+ __be32 op_own, bool bf_ok,
+ bool send_doorbell)
+{
+ tx_desc->ctrl.qpn_vlan = qpn_vlan;
+
+ if (bf_ok) {
+ op_own |= htonl((bf_index & 0xffff) << 8);
+ /* Ensure new descriptor hits memory
+ * before setting ownership of this descriptor to HW
+ */
+ dma_wmb();
+ tx_desc->ctrl.owner_opcode = op_own;
+
+ wmb();
+
+ mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl,
+ desc_size);
+
+ wmb();
+
+ ring->bf.offset ^= ring->bf.buf_size;
+ } else {
+ /* Ensure new descriptor hits memory
+ * before setting ownership of this descriptor to HW
+ */
+ dma_wmb();
+ tx_desc->ctrl.owner_opcode = op_own;
+ if (send_doorbell)
+ mlx4_en_xmit_doorbell(ring);
+ else
+ ring->xmit_more++;
+ }
+}
+
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
struct mlx4_en_priv *priv = netdev_priv(dev);
+ union mlx4_wqe_qpn_vlan qpn_vlan = {};
struct device *ddev = priv->ddev;
struct mlx4_en_tx_ring *ring;
struct mlx4_en_tx_desc *tx_desc;
@@ -724,7 +795,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
int real_size;
u32 index, bf_index;
__be32 op_own;
- u16 vlan_tag = 0;
u16 vlan_proto = 0;
int i_frag;
int lso_header_size;
@@ -734,20 +804,21 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
bool stop_queue;
bool inline_ok;
u32 ring_cons;
-
- if (!priv->port_up)
- goto tx_drop;
+ bool bf_ok;
tx_ind = skb_get_queue_mapping(skb);
ring = priv->tx_ring[tx_ind];
+ if (!priv->port_up)
+ goto tx_drop;
+
/* fetch ring->cons far ahead before needing it to avoid stall */
ring_cons = ACCESS_ONCE(ring->cons);
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
&inline_ok, &fragptr);
if (unlikely(!real_size))
- goto tx_drop;
+ goto tx_drop_count;
/* Align descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
@@ -755,12 +826,20 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Oversized header or SG list\n");
- goto tx_drop;
+ goto tx_drop_count;
}
+ bf_ok = ring->bf_enabled;
if (skb_vlan_tag_present(skb)) {
- vlan_tag = skb_vlan_tag_get(skb);
+ qpn_vlan.vlan_tag = cpu_to_be16(skb_vlan_tag_get(skb));
vlan_proto = be16_to_cpu(skb->vlan_proto);
+ if (vlan_proto == ETH_P_8021AD)
+ qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
+ else if (vlan_proto == ETH_P_8021Q)
+ qpn_vlan.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
+ else
+ qpn_vlan.ins_vlan = 0;
+ bf_ok = false;
}
netdev_txq_bql_enqueue_prefetchw(ring->tx_queue);
@@ -780,6 +859,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
else {
tx_desc = (struct mlx4_en_tx_desc *) ring->bounce_buf;
bounce = true;
+ bf_ok = false;
}
/* Save skb in tx_info ring */
@@ -916,12 +996,21 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
if (tx_info->inl)
- build_inline_wqe(tx_desc, skb, shinfo, real_size, &vlan_tag,
- tx_ind, fragptr);
+ build_inline_wqe(tx_desc, skb, shinfo, fragptr);
if (skb->encapsulation) {
- struct iphdr *ipv4 = (struct iphdr *)skb_inner_network_header(skb);
- if (ipv4->protocol == IPPROTO_TCP || ipv4->protocol == IPPROTO_UDP)
+ union {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+ } ip;
+ u8 proto;
+
+ ip.hdr = skb_inner_network_header(skb);
+ proto = (ip.v4->version == 4) ? ip.v4->protocol :
+ ip.v6->nexthdr;
+
+ if (proto == IPPROTO_TCP || proto == IPPROTO_UDP)
op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP | MLX4_WQE_CTRL_ILP);
else
op_own |= cpu_to_be32(MLX4_WQE_CTRL_IIP);
@@ -945,60 +1034,15 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
real_size = (real_size / 16) & 0x3f;
- if (ring->bf_enabled && desc_size <= MAX_BF && !bounce &&
- !skb_vlan_tag_present(skb) && send_doorbell) {
- tx_desc->ctrl.bf_qpn = ring->doorbell_qpn |
- cpu_to_be32(real_size);
-
- op_own |= htonl((bf_index & 0xffff) << 8);
- /* Ensure new descriptor hits memory
- * before setting ownership of this descriptor to HW
- */
- dma_wmb();
- tx_desc->ctrl.owner_opcode = op_own;
-
- wmb();
-
- mlx4_bf_copy(ring->bf.reg + ring->bf.offset, &tx_desc->ctrl,
- desc_size);
+ bf_ok &= desc_size <= MAX_BF && send_doorbell;
- wmb();
-
- ring->bf.offset ^= ring->bf.buf_size;
- } else {
- tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
- if (vlan_proto == ETH_P_8021AD)
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_SVLAN;
- else if (vlan_proto == ETH_P_8021Q)
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_CVLAN;
- else
- tx_desc->ctrl.ins_vlan = 0;
+ if (bf_ok)
+ qpn_vlan.bf_qpn = ring->doorbell_qpn | cpu_to_be32(real_size);
+ else
+ qpn_vlan.fence_size = real_size;
- tx_desc->ctrl.fence_size = real_size;
-
- /* Ensure new descriptor hits memory
- * before setting ownership of this descriptor to HW
- */
- dma_wmb();
- tx_desc->ctrl.owner_opcode = op_own;
- if (send_doorbell) {
- wmb();
- /* Since there is no iowrite*_native() that writes the
- * value as is, without byteswapping - using the one
- * the doesn't do byteswapping in the relevant arch
- * endianness.
- */
-#if defined(__LITTLE_ENDIAN)
- iowrite32(
-#else
- iowrite32be(
-#endif
- ring->doorbell_qpn,
- ring->bf.uar->map + MLX4_SEND_DOORBELL);
- } else {
- ring->xmit_more++;
- }
- }
+ mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, desc_size, bf_index,
+ op_own, bf_ok, send_doorbell);
if (unlikely(stop_queue)) {
/* If queue was emptied after the if (stop_queue) , and before
@@ -1027,9 +1071,114 @@ tx_drop_unmap:
PCI_DMA_TODEVICE);
}
+tx_drop_count:
+ ring->tx_dropped++;
tx_drop:
dev_kfree_skb_any(skb);
- priv->stats.tx_dropped++;
return NETDEV_TX_OK;
}
+netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
+ struct net_device *dev, unsigned int length,
+ int tx_ind, int *doorbell_pending)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ union mlx4_wqe_qpn_vlan qpn_vlan = {};
+ struct mlx4_en_tx_ring *ring;
+ struct mlx4_en_tx_desc *tx_desc;
+ struct mlx4_wqe_data_seg *data;
+ struct mlx4_en_tx_info *tx_info;
+ int index, bf_index;
+ bool send_doorbell;
+ int nr_txbb = 1;
+ bool stop_queue;
+ dma_addr_t dma;
+ int real_size;
+ __be32 op_own;
+ u32 ring_cons;
+ bool bf_ok;
+
+ BUILD_BUG_ON_MSG(ALIGN(CTRL_SIZE + DS_SIZE, TXBB_SIZE) != TXBB_SIZE,
+ "mlx4_en_xmit_frame requires minimum size tx desc");
+
+ ring = priv->tx_ring[tx_ind];
+
+ if (!priv->port_up)
+ goto tx_drop;
+
+ if (mlx4_en_is_tx_ring_full(ring))
+ goto tx_drop_count;
+
+ /* fetch ring->cons far ahead before needing it to avoid stall */
+ ring_cons = READ_ONCE(ring->cons);
+
+ index = ring->prod & ring->size_mask;
+ tx_info = &ring->tx_info[index];
+
+ bf_ok = ring->bf_enabled;
+
+ /* Track current inflight packets for performance analysis */
+ AVG_PERF_COUNTER(priv->pstats.inflight_avg,
+ (u32)(ring->prod - ring_cons - 1));
+
+ bf_index = ring->prod;
+ tx_desc = ring->buf + index * TXBB_SIZE;
+ data = &tx_desc->data;
+
+ dma = frame->dma;
+
+ tx_info->page = frame->page;
+ frame->page = NULL;
+ tx_info->map0_dma = dma;
+ tx_info->map0_byte_count = length;
+ tx_info->nr_txbb = nr_txbb;
+ tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN);
+ tx_info->data_offset = (void *)data - (void *)tx_desc;
+ tx_info->ts_requested = 0;
+ tx_info->nr_maps = 1;
+ tx_info->linear = 1;
+ tx_info->inl = 0;
+
+ dma_sync_single_for_device(priv->ddev, dma, length, PCI_DMA_TODEVICE);
+
+ data->addr = cpu_to_be64(dma);
+ data->lkey = ring->mr_key;
+ dma_wmb();
+ data->byte_count = cpu_to_be32(length);
+
+ /* tx completion can avoid cache line miss for common cases */
+ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
+
+ op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
+ ((ring->prod & ring->size) ?
+ cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
+
+ ring->packets++;
+ ring->bytes += tx_info->nr_bytes;
+ AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, length);
+
+ ring->prod += nr_txbb;
+
+ stop_queue = mlx4_en_is_tx_ring_full(ring);
+ send_doorbell = stop_queue ||
+ *doorbell_pending > MLX4_EN_DOORBELL_BUDGET;
+ bf_ok &= send_doorbell;
+
+ real_size = ((CTRL_SIZE + nr_txbb * DS_SIZE) / 16) & 0x3f;
+
+ if (bf_ok)
+ qpn_vlan.bf_qpn = ring->doorbell_qpn | cpu_to_be32(real_size);
+ else
+ qpn_vlan.fence_size = real_size;
+
+ mlx4_en_tx_write_desc(ring, tx_desc, qpn_vlan, TXBB_SIZE, bf_index,
+ op_own, bf_ok, send_doorbell);
+ *doorbell_pending = send_doorbell ? 0 : *doorbell_pending + 1;
+
+ return NETDEV_TX_OK;
+
+tx_drop_count:
+ ring->tx_dropped++;
+tx_drop:
+ return NETDEV_TX_BUSY;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index f613977455e0..cf8f8a72a801 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1305,8 +1305,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
return 0;
err_out_unmap:
- while (i >= 0)
- mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
+ while (i > 0)
+ mlx4_free_eq(dev, &priv->eq_table.eq[--i]);
#ifdef CONFIG_RFS_ACCEL
for (i = 1; i <= dev->caps.num_ports; i++) {
if (mlx4_priv(dev)->port[i].rmap) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index e97094598b2d..d728704d0c7b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -721,6 +721,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_RSVD_LKEY_OFFSET 0x98
#define QUERY_DEV_CAP_MAX_ICM_SZ_OFFSET 0xa0
#define QUERY_DEV_CAP_ETH_BACKPL_OFFSET 0x9c
+#define QUERY_DEV_CAP_DIAG_RPRT_PER_PORT 0x9c
#define QUERY_DEV_CAP_FW_REASSIGN_MAC 0x9d
#define QUERY_DEV_CAP_VXLAN 0x9e
#define QUERY_DEV_CAP_MAD_DEMUX_OFFSET 0xb0
@@ -935,6 +936,9 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_ETH_BACKPL_AN_REP;
if (field32 & (1 << 7))
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_RECOVERABLE_ERROR_EVENT;
+ MLX4_GET(field32, outbox, QUERY_DEV_CAP_DIAG_RPRT_PER_PORT);
+ if (field32 & (1 << 17))
+ dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT;
MLX4_GET(field, outbox, QUERY_DEV_CAP_FW_REASSIGN_MAC);
if (field & 1<<6)
dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_REASSIGN_MAC_EN;
@@ -1128,6 +1132,7 @@ int mlx4_QUERY_PORT(struct mlx4_dev *dev, int port, struct mlx4_port_cap *port_c
port_cap->max_pkeys = 1 << (field & 0xf);
MLX4_GET(field, outbox, QUERY_PORT_MAX_VL_OFFSET);
port_cap->max_vl = field & 0xf;
+ port_cap->max_tc_eth = field >> 4;
MLX4_GET(field, outbox, QUERY_PORT_MAX_MACVLAN_OFFSET);
port_cap->log_max_macs = field & 0xf;
port_cap->log_max_vlans = field >> 4;
@@ -2456,6 +2461,42 @@ int mlx4_NOP(struct mlx4_dev *dev)
MLX4_CMD_NATIVE);
}
+int mlx4_query_diag_counters(struct mlx4_dev *dev, u8 op_modifier,
+ const u32 offset[],
+ u32 value[], size_t array_len, u8 port)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ size_t i;
+ int ret;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ outbox = mailbox->buf;
+
+ ret = mlx4_cmd_box(dev, 0, mailbox->dma, port, op_modifier,
+ MLX4_CMD_DIAG_RPRT, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (ret)
+ goto out;
+
+ for (i = 0; i < array_len; i++) {
+ if (offset[i] > MLX4_MAILBOX_SIZE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ MLX4_GET(value[i], outbox, offset[i]);
+ }
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return ret;
+}
+EXPORT_SYMBOL(mlx4_query_diag_counters);
+
int mlx4_get_phys_port_id(struct mlx4_dev *dev)
{
u8 port;
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index 7ea258af636a..cdbd76f10ced 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -53,6 +53,7 @@ struct mlx4_port_cap {
int ib_mtu;
int max_port_width;
int max_vl;
+ int max_tc_eth;
int max_gids;
int max_pkeys;
u64 def_mac;
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index dec77d6f0ac9..0e8b7c44931f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -147,7 +147,7 @@ int mlx4_do_bond(struct mlx4_dev *dev, bool enable)
if (enable) {
dev->flags |= MLX4_FLAG_BONDED;
} else {
- ret = mlx4_virt2phy_port_map(dev, 1, 2);
+ ret = mlx4_virt2phy_port_map(dev, 1, 2);
if (ret) {
mlx4_err(dev, "Fail to reset port map\n");
return ret;
@@ -218,6 +218,9 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_interface *intf;
+ if (!(dev->persist->interface_state & MLX4_INTERFACE_STATE_UP))
+ return;
+
mlx4_stop_catas_poll(dev);
mutex_lock(&intf_mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 12c77a70abdb..7183ac4135d2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -292,6 +292,7 @@ static int _mlx4_dev_port(struct mlx4_dev *dev, int port,
dev->caps.pkey_table_len[port] = port_cap->max_pkeys;
dev->caps.port_width_cap[port] = port_cap->max_port_width;
dev->caps.eth_mtu_cap[port] = port_cap->eth_mtu;
+ dev->caps.max_tc_eth = port_cap->max_tc_eth;
dev->caps.def_mac[port] = port_cap->def_mac;
dev->caps.supported_type[port] = port_cap->supported_port_types;
dev->caps.suggested_type[port] = port_cap->suggested_type;
@@ -2599,7 +2600,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err = mlx4_init_uar_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
- return err;
+ return err;
}
err = mlx4_uar_alloc(dev, &priv->driver_uar);
@@ -2969,6 +2970,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_attr);
+ devlink_port_unregister(&info->devlink_port);
info->port = -1;
}
@@ -2983,6 +2985,8 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
+ devlink_port_unregister(&info->devlink_port);
+
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(info->rmap);
info->rmap = NULL;
@@ -3222,6 +3226,7 @@ static int mlx4_load_one(struct pci_dev *pdev, int pci_dev_data,
INIT_LIST_HEAD(&priv->pgdir_list);
mutex_init(&priv->pgdir_mutex);
+ spin_lock_init(&priv->cmd.context_lock);
INIT_LIST_HEAD(&priv->bf_list);
mutex_init(&priv->bf_mutex);
@@ -4134,8 +4139,11 @@ static void mlx4_shutdown(struct pci_dev *pdev)
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
mutex_lock(&persist->interface_state_mutex);
- if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
+ if (persist->interface_state & MLX4_INTERFACE_STATE_UP) {
+ /* Notify mlx4 clients that the kernel is being shut down */
+ persist->interface_state |= MLX4_INTERFACE_STATE_SHUTDOWN;
mlx4_unload_one(pdev);
+ }
mutex_unlock(&persist->interface_state_mutex);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 6aa73972d478..94b891c118c1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -618,8 +618,8 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
err = mlx4_READ_ENTRY(dev,
entry->index,
mailbox);
- if (err)
- goto out_mailbox;
+ if (err)
+ goto out_mailbox;
members_count =
be32_to_cpu(mgm->members_count) &
0xffffff;
@@ -657,8 +657,8 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
err = mlx4_WRITE_ENTRY(dev,
entry->index,
mailbox);
- if (err)
- goto out_mailbox;
+ if (err)
+ goto out_mailbox;
}
}
}
@@ -1102,7 +1102,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mgm *mgm;
u32 members_count;
- int index, prev;
+ int index = -1, prev;
int link = 0;
int i;
int err;
@@ -1181,7 +1181,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
goto out;
out:
- if (prot == MLX4_PROT_ETH) {
+ if (prot == MLX4_PROT_ETH && index != -1) {
/* manage the steering entry for promisc mode */
if (new_entry)
err = new_steering_entry(dev, port, steer,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 63b1aeae2c03..9099dbd04951 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -132,6 +132,7 @@ enum {
MLX4_EN_NUM_UP)
#define MLX4_EN_DEFAULT_TX_WORK 256
+#define MLX4_EN_DOORBELL_BUDGET 8
/* Target number of packets to coalesce with interrupt moderation */
#define MLX4_EN_RX_COAL_TARGET 44
@@ -164,6 +165,10 @@ enum {
#define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
#define MLX4_EN_MIN_MTU 46
+/* VLAN_HLEN is added twice,to support skb vlan tagged with multiple
+ * headers. (For example: ETH_P_8021Q and ETH_P_8021AD).
+ */
+#define MLX4_EN_EFF_MTU(mtu) ((mtu) + ETH_HLEN + (2 * VLAN_HLEN))
#define ETH_BCAST 0xffffffffffffULL
#define MLX4_EN_LOOPBACK_RETRIES 5
@@ -215,7 +220,10 @@ enum cq_type {
struct mlx4_en_tx_info {
- struct sk_buff *skb;
+ union {
+ struct sk_buff *skb;
+ struct page *page;
+ };
dma_addr_t map0_dma;
u32 map0_byte_count;
u32 nr_txbb;
@@ -255,6 +263,14 @@ struct mlx4_en_rx_alloc {
u32 page_size;
};
+#define MLX4_EN_CACHE_SIZE (2 * NAPI_POLL_WEIGHT)
+struct mlx4_en_page_cache {
+ u32 index;
+ struct mlx4_en_rx_alloc buf[MLX4_EN_CACHE_SIZE];
+};
+
+struct mlx4_en_priv;
+
struct mlx4_en_tx_ring {
/* cache line used and dirtied in tx completion
* (mlx4_en_free_tx_buf())
@@ -270,6 +286,7 @@ struct mlx4_en_tx_ring {
unsigned long tx_csum;
unsigned long tso_packets;
unsigned long xmit_more;
+ unsigned int tx_dropped;
struct mlx4_bf bf;
unsigned long queue_stopped;
@@ -287,6 +304,11 @@ struct mlx4_en_tx_ring {
__be32 mr_key;
void *buf;
struct mlx4_en_tx_info *tx_info;
+ struct mlx4_en_rx_ring *recycle_ring;
+ u32 (*free_tx_desc)(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner,
+ u64 timestamp, int napi_mode);
u8 *bounce_buf;
struct mlx4_qp_context context;
int qpn;
@@ -318,6 +340,8 @@ struct mlx4_en_rx_ring {
u8 fcs_del;
void *buf;
void *rx_info;
+ struct bpf_prog *xdp_prog;
+ struct mlx4_en_page_cache page_cache;
unsigned long bytes;
unsigned long packets;
unsigned long csum_ok;
@@ -352,12 +376,14 @@ struct mlx4_en_port_profile {
u32 rx_ring_num;
u32 tx_ring_size;
u32 rx_ring_size;
+ u8 num_tx_rings_p_up;
u8 rx_pause;
u8 rx_ppp;
u8 tx_pause;
u8 tx_ppp;
int rss_rings;
int inline_thold;
+ struct hwtstamp_config hwtstamp_config;
};
struct mlx4_en_profile {
@@ -437,7 +463,9 @@ struct mlx4_en_mc_list {
struct mlx4_en_frag_info {
u16 frag_size;
u16 frag_prefix_size;
- u16 frag_stride;
+ u32 frag_stride;
+ enum dma_data_direction dma_dir;
+ int order;
};
#ifdef CONFIG_MLX4_EN_DCB
@@ -447,6 +475,17 @@ struct mlx4_en_frag_info {
#define MLX4_EN_TC_ETS 7
+enum dcb_pfc_type {
+ pfc_disabled = 0,
+ pfc_enabled_full,
+ pfc_enabled_tx,
+ pfc_enabled_rx
+};
+
+struct mlx4_en_cee_config {
+ bool pfc_state;
+ enum dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP];
+};
#endif
struct ethtool_flow_id {
@@ -466,6 +505,9 @@ enum {
MLX4_EN_FLAG_RX_FILTER_NEEDED = (1 << 3),
MLX4_EN_FLAG_FORCE_PROMISC = (1 << 4),
MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP = (1 << 5),
+#ifdef CONFIG_MLX4_EN_DCB
+ MLX4_EN_FLAG_DCB_ENABLED = (1 << 6),
+#endif
};
#define PORT_BEACON_MAX_LIMIT (65535)
@@ -482,8 +524,6 @@ struct mlx4_en_priv {
struct mlx4_en_port_profile *prof;
struct net_device *dev;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct net_device_stats stats;
- struct net_device_stats ret_stats;
struct mlx4_en_port_state port_state;
spinlock_t stats_lock;
struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES];
@@ -535,6 +575,7 @@ struct mlx4_en_priv {
struct mlx4_en_frag_info frag_info[MLX4_EN_MAX_RX_FRAGS];
u16 num_frags;
u16 log_rx_info;
+ int xdp_ring_num;
struct mlx4_en_tx_ring **tx_ring;
struct mlx4_en_rx_ring *rx_ring[MAX_RX_RINGS];
@@ -546,10 +587,8 @@ struct mlx4_en_priv {
struct work_struct linkstate_task;
struct delayed_work stats_task;
struct delayed_work service_task;
-#ifdef CONFIG_MLX4_EN_VXLAN
struct work_struct vxlan_add_task;
struct work_struct vxlan_del_task;
-#endif
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_counter_stats pf_stats;
@@ -571,9 +610,12 @@ struct mlx4_en_priv {
u32 counter_index;
#ifdef CONFIG_MLX4_EN_DCB
+#define MLX4_EN_DCB_ENABLED 0x3
struct ieee_ets ets;
u16 maxrate[IEEE_8021QAZ_MAX_TCS];
enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
+ struct mlx4_en_cee_config cee_config;
+ u8 dcbx_cap;
#endif
#ifdef CONFIG_RFS_ACCEL
spinlock_t filters_lock;
@@ -624,8 +666,11 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
u8 rx_ppp, u8 rx_pause,
u8 tx_ppp, u8 tx_pause);
-void mlx4_en_free_resources(struct mlx4_en_priv *priv);
-int mlx4_en_alloc_resources(struct mlx4_en_priv *priv);
+int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp,
+ struct mlx4_en_port_profile *prof);
+void mlx4_en_safe_replace_resources(struct mlx4_en_priv *priv,
+ struct mlx4_en_priv *tmp);
int mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq,
int entries, int ring, enum cq_type mode, int node);
@@ -640,6 +685,12 @@ void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
+ struct net_device *dev, unsigned int length,
+ int tx_ind, int *doorbell_pending);
+void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring);
+bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
+ struct mlx4_en_rx_alloc *frame);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring,
@@ -668,12 +719,18 @@ int mlx4_en_process_rx_cq(struct net_device *dev,
int budget);
int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget);
int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget);
+u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner, u64 timestamp,
+ int napi_mode);
+u32 mlx4_en_recycle_tx_desc(struct mlx4_en_priv *priv,
+ struct mlx4_en_tx_ring *ring,
+ int index, u8 owner, u64 timestamp,
+ int napi_mode);
void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
int is_tx, int rss, int qpn, int cqn, int user_prio,
struct mlx4_qp_context *context);
void mlx4_en_sqp_event(struct mlx4_qp *qp, enum mlx4_event event);
-int mlx4_en_map_buffer(struct mlx4_buf *buf);
-void mlx4_en_unmap_buffer(struct mlx4_buf *buf);
int mlx4_en_change_mcast_lb(struct mlx4_en_priv *priv, struct mlx4_qp *qp,
int loopback);
void mlx4_en_calc_rx_buf(struct net_device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 93195191f45b..395b5463cfd9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -248,7 +248,7 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
offset, order);
return;
}
- __mlx4_free_mtt_range(dev, offset, order);
+ __mlx4_free_mtt_range(dev, offset, order);
}
void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index b3cc3ab63799..6fc156a3918d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -205,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
goto free_uar;
}
- uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT);
+ uar->bf_map = io_mapping_map_wc(priv->bf_mapping,
+ uar->index << PAGE_SHIFT,
+ PAGE_SIZE);
if (!uar->bf_map) {
err = -ENOMEM;
goto unamp_uar;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 087b23b320cb..c5b2064297a1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -52,6 +52,7 @@
#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
#define MLX4_IGNORE_FCS_MASK 0x1
+#define MLX4_TC_MAX_NUMBER 8
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
{
@@ -2015,3 +2016,14 @@ out:
return ret;
}
EXPORT_SYMBOL(mlx4_get_module_info);
+
+int mlx4_max_tc(struct mlx4_dev *dev)
+{
+ u8 num_tc = dev->caps.max_tc_eth;
+
+ if (!num_tc)
+ num_tc = MLX4_TC_MAX_NUMBER;
+
+ return num_tc;
+}
+EXPORT_SYMBOL(mlx4_max_tc);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index cd9b2b28df88..8b81114bdc72 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2372,16 +2372,15 @@ static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
__mlx4_mpt_release(dev, index);
break;
case RES_OP_MAP_ICM:
- index = get_param_l(&in_param);
- id = index & mpt_mask(dev);
- err = mr_res_start_move_to(dev, slave, id,
- RES_MPT_RESERVED, &mpt);
- if (err)
- return err;
-
- __mlx4_mpt_free_icm(dev, mpt->key);
- res_end_move(dev, slave, RES_MPT, id);
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id,
+ RES_MPT_RESERVED, &mpt);
+ if (err)
return err;
+
+ __mlx4_mpt_free_icm(dev, mpt->key);
+ res_end_move(dev, slave, RES_MPT, id);
break;
default:
err = -EINVAL;
@@ -4253,9 +4252,8 @@ int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
(1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
!(dev->caps.flags2 &
MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
- mlx4_warn(dev,
- "Src check LB for slave %d isn't supported\n",
- slave);
+ mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
+ slave);
return -ENOTSUPP;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index f5c3b9465d8d..aae46884bf93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -4,6 +4,7 @@
config MLX5_CORE
tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
+ depends on MAY_USE_DEVLINK
depends on PCI
default n
---help---
@@ -31,10 +32,3 @@ config MLX5_CORE_EN_DCB
This flag is depended on the kernel's DCB support.
If unsure, set to Y
-
-config MLX5_CORE_EN_VXLAN
- bool "VXLAN offloads Support"
- default y
- depends on MLX5_CORE_EN && VXLAN && !(MLX5_CORE=y && VXLAN=m)
- ---help---
- Say Y here if you want to use VXLAN offloads in the driver.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index bf65b71c7360..05cc1effc13c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -1,12 +1,13 @@
obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
- health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
- mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o
+ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
+ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
+ fs_counters.o rl.o
-mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o \
- en_main.o en_fs.o en_ethtool.o en_tx.o en_rx.o \
- en_txrx.o en_clock.o en_tc.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \
+ en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \
+ en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \
+ en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o
-mlx5_core-$(CONFIG_MLX5_CORE_EN_VXLAN) += vxlan.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index eb926e1ee71c..c2ec01a22d55 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
return cmd->cmd_buf + (idx << cmd->log_stride);
}
-static u8 xor8_buf(void *buf, int len)
+static u8 xor8_buf(void *buf, size_t offset, int len)
{
u8 *ptr = buf;
u8 sum = 0;
int i;
+ int end = len + offset;
- for (i = 0; i < len; i++)
+ for (i = offset; i < end; i++)
sum ^= ptr[i];
return sum;
@@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len)
static int verify_block_sig(struct mlx5_cmd_prot_block *block)
{
- if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
+ size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
+ int xor_len = sizeof(*block) - sizeof(block->data) - 1;
+
+ if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
return -EINVAL;
- if (xor8_buf(block, sizeof(*block)) != 0xff)
+ if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
return -EINVAL;
return 0;
}
-static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
- int csum)
+static void calc_block_sig(struct mlx5_cmd_prot_block *block)
{
- block->token = token;
- if (csum) {
- block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
- sizeof(block->data) - 2);
- block->sig = ~xor8_buf(block, sizeof(*block) - 1);
- }
+ int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
+ size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
+
+ block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
+ block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
}
-static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
+static void calc_chain_sig(struct mlx5_cmd_msg *msg)
{
struct mlx5_cmd_mailbox *next = msg->next;
-
- while (next) {
- calc_block_sig(next->buf, token, csum);
+ int size = msg->len;
+ int blen = size - min_t(int, sizeof(msg->first.data), size);
+ int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
+ / MLX5_CMD_DATA_BLOCK_SIZE;
+ int i = 0;
+
+ for (i = 0; i < n && next; i++) {
+ calc_block_sig(next->buf);
next = next->next;
}
}
static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
{
- ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
- calc_chain_sig(ent->in, ent->token, csum);
- calc_chain_sig(ent->out, ent->token, csum);
+ ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
+ if (csum) {
+ calc_chain_sig(ent->in);
+ calc_chain_sig(ent->out);
+ }
}
static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
struct mlx5_cmd_mailbox *next = ent->out->next;
int err;
u8 sig;
+ int size = ent->out->len;
+ int blen = size - min_t(int, sizeof(ent->out->first.data), size);
+ int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
+ / MLX5_CMD_DATA_BLOCK_SIZE;
+ int i = 0;
- sig = xor8_buf(ent->lay, sizeof(*ent->lay));
+ sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
if (sig != 0xff)
return -EINVAL;
- while (next) {
+ for (i = 0; i < n && next; i++) {
err = verify_block_sig(next->buf);
if (err)
return err;
@@ -294,6 +308,13 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
case MLX5_CMD_OP_DESTROY_FLOW_GROUP:
case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_2ERR_QP:
+ case MLX5_CMD_OP_2RST_QP:
+ case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
+ case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT:
return MLX5_CMD_STAT_OK;
case MLX5_CMD_OP_QUERY_HCA_CAP:
@@ -320,8 +341,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
case MLX5_CMD_OP_SQERR2RTS_QP:
- case MLX5_CMD_OP_2ERR_QP:
- case MLX5_CMD_OP_2RST_QP:
case MLX5_CMD_OP_QUERY_QP:
case MLX5_CMD_OP_SQD_RTS_QP:
case MLX5_CMD_OP_INIT2INIT_QP:
@@ -341,7 +360,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
- case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT:
case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
case MLX5_CMD_OP_SET_ROCE_ADDRESS:
case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
@@ -389,12 +407,15 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_RQT:
case MLX5_CMD_OP_MODIFY_RQT:
case MLX5_CMD_OP_QUERY_RQT:
+
case MLX5_CMD_OP_CREATE_FLOW_TABLE:
case MLX5_CMD_OP_QUERY_FLOW_TABLE:
case MLX5_CMD_OP_CREATE_FLOW_GROUP:
case MLX5_CMD_OP_QUERY_FLOW_GROUP:
- case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
+
case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
+ case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
+ case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@@ -406,178 +427,143 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
const char *mlx5_command_str(int command)
{
- switch (command) {
- case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
- return "QUERY_HCA_VPORT_CONTEXT";
-
- case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT:
- return "MODIFY_HCA_VPORT_CONTEXT";
-
- case MLX5_CMD_OP_QUERY_HCA_CAP:
- return "QUERY_HCA_CAP";
-
- case MLX5_CMD_OP_SET_HCA_CAP:
- return "SET_HCA_CAP";
-
- case MLX5_CMD_OP_QUERY_ADAPTER:
- return "QUERY_ADAPTER";
-
- case MLX5_CMD_OP_INIT_HCA:
- return "INIT_HCA";
-
- case MLX5_CMD_OP_TEARDOWN_HCA:
- return "TEARDOWN_HCA";
-
- case MLX5_CMD_OP_ENABLE_HCA:
- return "MLX5_CMD_OP_ENABLE_HCA";
-
- case MLX5_CMD_OP_DISABLE_HCA:
- return "MLX5_CMD_OP_DISABLE_HCA";
-
- case MLX5_CMD_OP_QUERY_PAGES:
- return "QUERY_PAGES";
-
- case MLX5_CMD_OP_MANAGE_PAGES:
- return "MANAGE_PAGES";
-
- case MLX5_CMD_OP_CREATE_MKEY:
- return "CREATE_MKEY";
-
- case MLX5_CMD_OP_QUERY_MKEY:
- return "QUERY_MKEY";
-
- case MLX5_CMD_OP_DESTROY_MKEY:
- return "DESTROY_MKEY";
-
- case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS:
- return "QUERY_SPECIAL_CONTEXTS";
-
- case MLX5_CMD_OP_CREATE_EQ:
- return "CREATE_EQ";
-
- case MLX5_CMD_OP_DESTROY_EQ:
- return "DESTROY_EQ";
-
- case MLX5_CMD_OP_QUERY_EQ:
- return "QUERY_EQ";
-
- case MLX5_CMD_OP_CREATE_CQ:
- return "CREATE_CQ";
-
- case MLX5_CMD_OP_DESTROY_CQ:
- return "DESTROY_CQ";
-
- case MLX5_CMD_OP_QUERY_CQ:
- return "QUERY_CQ";
-
- case MLX5_CMD_OP_MODIFY_CQ:
- return "MODIFY_CQ";
-
- case MLX5_CMD_OP_CREATE_QP:
- return "CREATE_QP";
-
- case MLX5_CMD_OP_DESTROY_QP:
- return "DESTROY_QP";
-
- case MLX5_CMD_OP_RST2INIT_QP:
- return "RST2INIT_QP";
-
- case MLX5_CMD_OP_INIT2RTR_QP:
- return "INIT2RTR_QP";
-
- case MLX5_CMD_OP_RTR2RTS_QP:
- return "RTR2RTS_QP";
-
- case MLX5_CMD_OP_RTS2RTS_QP:
- return "RTS2RTS_QP";
-
- case MLX5_CMD_OP_SQERR2RTS_QP:
- return "SQERR2RTS_QP";
-
- case MLX5_CMD_OP_2ERR_QP:
- return "2ERR_QP";
-
- case MLX5_CMD_OP_2RST_QP:
- return "2RST_QP";
-
- case MLX5_CMD_OP_QUERY_QP:
- return "QUERY_QP";
-
- case MLX5_CMD_OP_MAD_IFC:
- return "MAD_IFC";
-
- case MLX5_CMD_OP_INIT2INIT_QP:
- return "INIT2INIT_QP";
-
- case MLX5_CMD_OP_CREATE_PSV:
- return "CREATE_PSV";
-
- case MLX5_CMD_OP_DESTROY_PSV:
- return "DESTROY_PSV";
-
- case MLX5_CMD_OP_CREATE_SRQ:
- return "CREATE_SRQ";
-
- case MLX5_CMD_OP_DESTROY_SRQ:
- return "DESTROY_SRQ";
-
- case MLX5_CMD_OP_QUERY_SRQ:
- return "QUERY_SRQ";
-
- case MLX5_CMD_OP_ARM_RQ:
- return "ARM_RQ";
-
- case MLX5_CMD_OP_CREATE_XRC_SRQ:
- return "CREATE_XRC_SRQ";
-
- case MLX5_CMD_OP_DESTROY_XRC_SRQ:
- return "DESTROY_XRC_SRQ";
-
- case MLX5_CMD_OP_QUERY_XRC_SRQ:
- return "QUERY_XRC_SRQ";
-
- case MLX5_CMD_OP_ARM_XRC_SRQ:
- return "ARM_XRC_SRQ";
-
- case MLX5_CMD_OP_ALLOC_PD:
- return "ALLOC_PD";
-
- case MLX5_CMD_OP_DEALLOC_PD:
- return "DEALLOC_PD";
-
- case MLX5_CMD_OP_ALLOC_UAR:
- return "ALLOC_UAR";
-
- case MLX5_CMD_OP_DEALLOC_UAR:
- return "DEALLOC_UAR";
-
- case MLX5_CMD_OP_ATTACH_TO_MCG:
- return "ATTACH_TO_MCG";
-
- case MLX5_CMD_OP_DETTACH_FROM_MCG:
- return "DETTACH_FROM_MCG";
-
- case MLX5_CMD_OP_ALLOC_XRCD:
- return "ALLOC_XRCD";
-
- case MLX5_CMD_OP_DEALLOC_XRCD:
- return "DEALLOC_XRCD";
-
- case MLX5_CMD_OP_ACCESS_REG:
- return "MLX5_CMD_OP_ACCESS_REG";
-
- case MLX5_CMD_OP_SET_WOL_ROL:
- return "SET_WOL_ROL";
-
- case MLX5_CMD_OP_QUERY_WOL_ROL:
- return "QUERY_WOL_ROL";
-
- case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
- return "ADD_VXLAN_UDP_DPORT";
-
- case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT:
- return "DELETE_VXLAN_UDP_DPORT";
+#define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
+ switch (command) {
+ MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP);
+ MLX5_COMMAND_STR_CASE(QUERY_ADAPTER);
+ MLX5_COMMAND_STR_CASE(INIT_HCA);
+ MLX5_COMMAND_STR_CASE(TEARDOWN_HCA);
+ MLX5_COMMAND_STR_CASE(ENABLE_HCA);
+ MLX5_COMMAND_STR_CASE(DISABLE_HCA);
+ MLX5_COMMAND_STR_CASE(QUERY_PAGES);
+ MLX5_COMMAND_STR_CASE(MANAGE_PAGES);
+ MLX5_COMMAND_STR_CASE(SET_HCA_CAP);
+ MLX5_COMMAND_STR_CASE(QUERY_ISSI);
+ MLX5_COMMAND_STR_CASE(SET_ISSI);
+ MLX5_COMMAND_STR_CASE(CREATE_MKEY);
+ MLX5_COMMAND_STR_CASE(QUERY_MKEY);
+ MLX5_COMMAND_STR_CASE(DESTROY_MKEY);
+ MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS);
+ MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME);
+ MLX5_COMMAND_STR_CASE(CREATE_EQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_EQ);
+ MLX5_COMMAND_STR_CASE(QUERY_EQ);
+ MLX5_COMMAND_STR_CASE(GEN_EQE);
+ MLX5_COMMAND_STR_CASE(CREATE_CQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_CQ);
+ MLX5_COMMAND_STR_CASE(QUERY_CQ);
+ MLX5_COMMAND_STR_CASE(MODIFY_CQ);
+ MLX5_COMMAND_STR_CASE(CREATE_QP);
+ MLX5_COMMAND_STR_CASE(DESTROY_QP);
+ MLX5_COMMAND_STR_CASE(RST2INIT_QP);
+ MLX5_COMMAND_STR_CASE(INIT2RTR_QP);
+ MLX5_COMMAND_STR_CASE(RTR2RTS_QP);
+ MLX5_COMMAND_STR_CASE(RTS2RTS_QP);
+ MLX5_COMMAND_STR_CASE(SQERR2RTS_QP);
+ MLX5_COMMAND_STR_CASE(2ERR_QP);
+ MLX5_COMMAND_STR_CASE(2RST_QP);
+ MLX5_COMMAND_STR_CASE(QUERY_QP);
+ MLX5_COMMAND_STR_CASE(SQD_RTS_QP);
+ MLX5_COMMAND_STR_CASE(INIT2INIT_QP);
+ MLX5_COMMAND_STR_CASE(CREATE_PSV);
+ MLX5_COMMAND_STR_CASE(DESTROY_PSV);
+ MLX5_COMMAND_STR_CASE(CREATE_SRQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_SRQ);
+ MLX5_COMMAND_STR_CASE(QUERY_SRQ);
+ MLX5_COMMAND_STR_CASE(ARM_RQ);
+ MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ);
+ MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ);
+ MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ);
+ MLX5_COMMAND_STR_CASE(CREATE_DCT);
+ MLX5_COMMAND_STR_CASE(DESTROY_DCT);
+ MLX5_COMMAND_STR_CASE(DRAIN_DCT);
+ MLX5_COMMAND_STR_CASE(QUERY_DCT);
+ MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION);
+ MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE);
+ MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE);
+ MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS);
+ MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS);
+ MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT);
+ MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID);
+ MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY);
+ MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER);
+ MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER);
+ MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER);
+ MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER);
+ MLX5_COMMAND_STR_CASE(ALLOC_PD);
+ MLX5_COMMAND_STR_CASE(DEALLOC_PD);
+ MLX5_COMMAND_STR_CASE(ALLOC_UAR);
+ MLX5_COMMAND_STR_CASE(DEALLOC_UAR);
+ MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION);
+ MLX5_COMMAND_STR_CASE(ACCESS_REG);
+ MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG);
+ MLX5_COMMAND_STR_CASE(DETTACH_FROM_MCG);
+ MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG);
+ MLX5_COMMAND_STR_CASE(MAD_IFC);
+ MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX);
+ MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX);
+ MLX5_COMMAND_STR_CASE(NOP);
+ MLX5_COMMAND_STR_CASE(ALLOC_XRCD);
+ MLX5_COMMAND_STR_CASE(DEALLOC_XRCD);
+ MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN);
+ MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN);
+ MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS);
+ MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS);
+ MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS);
+ MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS);
+ MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS);
+ MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT);
+ MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT);
+ MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(SET_WOL_ROL);
+ MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL);
+ MLX5_COMMAND_STR_CASE(CREATE_TIR);
+ MLX5_COMMAND_STR_CASE(MODIFY_TIR);
+ MLX5_COMMAND_STR_CASE(DESTROY_TIR);
+ MLX5_COMMAND_STR_CASE(QUERY_TIR);
+ MLX5_COMMAND_STR_CASE(CREATE_SQ);
+ MLX5_COMMAND_STR_CASE(MODIFY_SQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_SQ);
+ MLX5_COMMAND_STR_CASE(QUERY_SQ);
+ MLX5_COMMAND_STR_CASE(CREATE_RQ);
+ MLX5_COMMAND_STR_CASE(MODIFY_RQ);
+ MLX5_COMMAND_STR_CASE(DESTROY_RQ);
+ MLX5_COMMAND_STR_CASE(QUERY_RQ);
+ MLX5_COMMAND_STR_CASE(CREATE_RMP);
+ MLX5_COMMAND_STR_CASE(MODIFY_RMP);
+ MLX5_COMMAND_STR_CASE(DESTROY_RMP);
+ MLX5_COMMAND_STR_CASE(QUERY_RMP);
+ MLX5_COMMAND_STR_CASE(CREATE_TIS);
+ MLX5_COMMAND_STR_CASE(MODIFY_TIS);
+ MLX5_COMMAND_STR_CASE(DESTROY_TIS);
+ MLX5_COMMAND_STR_CASE(QUERY_TIS);
+ MLX5_COMMAND_STR_CASE(CREATE_RQT);
+ MLX5_COMMAND_STR_CASE(MODIFY_RQT);
+ MLX5_COMMAND_STR_CASE(DESTROY_RQT);
+ MLX5_COMMAND_STR_CASE(QUERY_RQT);
+ MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT);
+ MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE);
+ MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE);
+ MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE);
+ MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP);
+ MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP);
+ MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP);
+ MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY);
+ MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER);
+ MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER);
+ MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER);
+ MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE);
default: return "unknown command opcode";
}
}
@@ -634,11 +620,36 @@ static void dump_command(struct mlx5_core_dev *dev,
pr_debug("\n");
}
+static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
+{
+ struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
+
+ return be16_to_cpu(hdr->opcode);
+}
+
+static void cb_timeout_handler(struct work_struct *work)
+{
+ struct delayed_work *dwork = container_of(work, struct delayed_work,
+ work);
+ struct mlx5_cmd_work_ent *ent = container_of(dwork,
+ struct mlx5_cmd_work_ent,
+ cb_timeout_work);
+ struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev,
+ cmd);
+
+ ent->ret = -ETIMEDOUT;
+ mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
+ mlx5_command_str(msg_to_opcode(ent->in)),
+ msg_to_opcode(ent->in));
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
+}
+
static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
struct mlx5_cmd *cmd = ent->cmd;
struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
struct mlx5_cmd_layout *lay;
struct semaphore *sem;
unsigned long flags;
@@ -659,7 +670,6 @@ static void cmd_work_handler(struct work_struct *work)
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
- ent->token = alloc_token(cmd);
cmd->ent_arr[ent->idx] = ent;
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
@@ -679,6 +689,9 @@ static void cmd_work_handler(struct work_struct *work)
dump_command(dev, ent, 1);
ent->ts1 = ktime_get_ns();
+ if (ent->callback)
+ schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
+
/* ring doorbell after the descriptor is valid */
mlx5_core_dbg(dev, "writing 0x%x to command doorbell\n", 1 << ent->idx);
wmb();
@@ -723,13 +736,6 @@ static const char *deliv_status_to_str(u8 status)
}
}
-static u16 msg_to_opcode(struct mlx5_cmd_msg *in)
-{
- struct mlx5_inbox_hdr *hdr = (struct mlx5_inbox_hdr *)(in->first.data);
-
- return be16_to_cpu(hdr->opcode);
-}
-
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
{
unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
@@ -738,13 +744,13 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
if (cmd->mode == CMD_MODE_POLLING) {
wait_for_completion(&ent->done);
- err = ent->ret;
- } else {
- if (!wait_for_completion_timeout(&ent->done, timeout))
- err = -ETIMEDOUT;
- else
- err = 0;
+ } else if (!wait_for_completion_timeout(&ent->done, timeout)) {
+ ent->ret = -ETIMEDOUT;
+ mlx5_cmd_comp_handler(dev, 1UL << ent->idx);
}
+
+ err = ent->ret;
+
if (err == -ETIMEDOUT) {
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)),
@@ -773,7 +779,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out, void *uout, int uout_size,
mlx5_cmd_cbk_t callback,
- void *context, int page_queue, u8 *status)
+ void *context, int page_queue, u8 *status,
+ u8 token)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
@@ -790,9 +797,12 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (IS_ERR(ent))
return PTR_ERR(ent);
+ ent->token = token;
+
if (!callback)
init_completion(&ent->done);
+ INIT_DELAYED_WORK(&ent->cb_timeout_work, cb_timeout_handler);
INIT_WORK(&ent->work, cmd_work_handler);
if (page_queue) {
cmd_work_handler(&ent->work);
@@ -802,28 +812,26 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
goto out_free;
}
- if (!callback) {
- err = wait_func(dev, ent);
- if (err == -ETIMEDOUT)
- goto out;
-
- ds = ent->ts2 - ent->ts1;
- op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
- if (op < ARRAY_SIZE(cmd->stats)) {
- stats = &cmd->stats[op];
- spin_lock_irq(&stats->lock);
- stats->sum += ds;
- ++stats->n;
- spin_unlock_irq(&stats->lock);
- }
- mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
- "fw exec time for %s is %lld nsec\n",
- mlx5_command_str(op), ds);
- *status = ent->status;
- free_cmd(ent);
- }
+ if (callback)
+ goto out;
- return err;
+ err = wait_func(dev, ent);
+ if (err == -ETIMEDOUT)
+ goto out_free;
+
+ ds = ent->ts2 - ent->ts1;
+ op = be16_to_cpu(((struct mlx5_inbox_hdr *)in->first.data)->opcode);
+ if (op < ARRAY_SIZE(cmd->stats)) {
+ stats = &cmd->stats[op];
+ spin_lock_irq(&stats->lock);
+ stats->sum += ds;
+ ++stats->n;
+ spin_unlock_irq(&stats->lock);
+ }
+ mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME,
+ "fw exec time for %s is %lld nsec\n",
+ mlx5_command_str(op), ds);
+ *status = ent->status;
out_free:
free_cmd(ent);
@@ -862,7 +870,8 @@ static const struct file_operations fops = {
.write = dbg_write,
};
-static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
+static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
+ u8 token)
{
struct mlx5_cmd_prot_block *block;
struct mlx5_cmd_mailbox *next;
@@ -888,6 +897,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
memcpy(block->data, from, copy);
from += copy;
size -= copy;
+ block->token = token;
next = next->next;
}
@@ -957,7 +967,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev,
}
static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
- gfp_t flags, int size)
+ gfp_t flags, int size,
+ u8 token)
{
struct mlx5_cmd_mailbox *tmp, *head = NULL;
struct mlx5_cmd_prot_block *block;
@@ -986,6 +997,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
tmp->next = head;
block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
block->block_num = cpu_to_be32(n - i - 1);
+ block->token = token;
head = tmp;
}
msg->next = head;
@@ -1213,41 +1225,30 @@ err_dbg:
return err;
}
-void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
+static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
{
struct mlx5_cmd *cmd = &dev->cmd;
int i;
for (i = 0; i < cmd->max_reg_cmds; i++)
down(&cmd->sem);
-
down(&cmd->pages_sem);
- flush_workqueue(cmd->wq);
-
- cmd->mode = CMD_MODE_EVENTS;
+ cmd->mode = mode;
up(&cmd->pages_sem);
for (i = 0; i < cmd->max_reg_cmds; i++)
up(&cmd->sem);
}
-void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+void mlx5_cmd_use_events(struct mlx5_core_dev *dev)
{
- struct mlx5_cmd *cmd = &dev->cmd;
- int i;
-
- for (i = 0; i < cmd->max_reg_cmds; i++)
- down(&cmd->sem);
-
- down(&cmd->pages_sem);
-
- flush_workqueue(cmd->wq);
- cmd->mode = CMD_MODE_POLLING;
+ mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS);
+}
- up(&cmd->pages_sem);
- for (i = 0; i < cmd->max_reg_cmds; i++)
- up(&cmd->sem);
+void mlx5_cmd_use_polling(struct mlx5_core_dev *dev)
+{
+ mlx5_cmd_change_mod(dev, CMD_MODE_POLLING);
}
static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg)
@@ -1283,6 +1284,8 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec)
struct semaphore *sem;
ent = cmd->ent_arr[i];
+ if (ent->callback)
+ cancel_delayed_work(&ent->cb_timeout_work);
if (ent->page_queue)
sem = &cmd->pages_sem;
else
@@ -1369,7 +1372,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
}
if (IS_ERR(msg))
- msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
+ msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
return msg;
}
@@ -1394,6 +1397,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int err;
u8 status = 0;
u32 drv_synd;
+ u8 token;
if (pci_channel_offline(dev->pdev) ||
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@@ -1412,20 +1416,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
return err;
}
- err = mlx5_copy_to_msg(inb, in, in_size);
+ token = alloc_token(&dev->cmd);
+
+ err = mlx5_copy_to_msg(inb, in, in_size, token);
if (err) {
mlx5_core_warn(dev, "err %d\n", err);
goto out_in;
}
- outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
+ outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
if (IS_ERR(outb)) {
err = PTR_ERR(outb);
goto out_in;
}
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
- pages_queue, &status);
+ pages_queue, &status, token);
if (err)
goto out_out;
@@ -1493,7 +1499,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
INIT_LIST_HEAD(&cmd->cache.med.head);
for (i = 0; i < NUM_LONG_LISTS; i++) {
- msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
+ msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
if (IS_ERR(msg)) {
err = PTR_ERR(msg);
goto ex_err;
@@ -1503,7 +1509,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
}
for (i = 0; i < NUM_MED_LISTS; i++) {
- msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
+ msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
if (IS_ERR(msg)) {
err = PTR_ERR(msg);
goto ex_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index b51e42d6fbec..873a631ad155 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -39,6 +39,53 @@
#include <linux/mlx5/cq.h>
#include "mlx5_core.h"
+#define TASKLET_MAX_TIME 2
+#define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME)
+
+void mlx5_cq_tasklet_cb(unsigned long data)
+{
+ unsigned long flags;
+ unsigned long end = jiffies + TASKLET_MAX_TIME_JIFFIES;
+ struct mlx5_eq_tasklet *ctx = (struct mlx5_eq_tasklet *)data;
+ struct mlx5_core_cq *mcq;
+ struct mlx5_core_cq *temp;
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ list_splice_tail_init(&ctx->list, &ctx->process_list);
+ spin_unlock_irqrestore(&ctx->lock, flags);
+
+ list_for_each_entry_safe(mcq, temp, &ctx->process_list,
+ tasklet_ctx.list) {
+ list_del_init(&mcq->tasklet_ctx.list);
+ mcq->tasklet_ctx.comp(mcq);
+ if (atomic_dec_and_test(&mcq->refcount))
+ complete(&mcq->free);
+ if (time_after(jiffies, end))
+ break;
+ }
+
+ if (!list_empty(&ctx->process_list))
+ tasklet_schedule(&ctx->task);
+}
+
+static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
+{
+ unsigned long flags;
+ struct mlx5_eq_tasklet *tasklet_ctx = cq->tasklet_ctx.priv;
+
+ spin_lock_irqsave(&tasklet_ctx->lock, flags);
+ /* When migrating CQs between EQs will be implemented, please note
+ * that you need to sync this point. It is possible that
+ * while migrating a CQ, completions on the old EQs could
+ * still arrive.
+ */
+ if (list_empty_careful(&cq->tasklet_ctx.list)) {
+ atomic_inc(&cq->refcount);
+ list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
+ }
+ spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
+}
+
void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
{
struct mlx5_core_cq *cq;
@@ -96,6 +143,13 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
struct mlx5_create_cq_mbox_out out;
struct mlx5_destroy_cq_mbox_in din;
struct mlx5_destroy_cq_mbox_out dout;
+ int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
+ c_eqn);
+ struct mlx5_eq *eq;
+
+ eq = mlx5_eqn2eq(dev, eqn);
+ if (IS_ERR(eq))
+ return PTR_ERR(eq);
in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_CQ);
memset(&out, 0, sizeof(out));
@@ -111,6 +165,11 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->arm_sn = 0;
atomic_set(&cq->refcount, 1);
init_completion(&cq->free);
+ if (!cq->comp)
+ cq->comp = mlx5_add_cq_to_tasklet;
+ /* assuming CQ will be deleted before the EQ */
+ cq->tasklet_ctx.priv = &eq->tasklet_ctx;
+ INIT_LIST_HEAD(&cq->tasklet_ctx.list);
spin_lock_irq(&table->lock);
err = radix_tree_insert(&table->tree, cq->cqn, cq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 24344aafbd36..bf722aa88cf0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -44,8 +44,12 @@
#include <linux/mlx5/vport.h>
#include <linux/mlx5/transobj.h>
#include <linux/rhashtable.h>
+#include <net/switchdev.h>
#include "wq.h"
#include "mlx5_core.h"
+#include "en_stats.h"
+
+#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
#define MLX5E_MAX_NUM_TC 8
@@ -57,263 +61,152 @@
#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x1
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW 0x4
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW 0x6
+
+#define MLX5_MPWRQ_LOG_STRIDE_SIZE 6 /* >= 6, HW restriction */
+#define MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS 8 /* >= 6, HW restriction */
+#define MLX5_MPWRQ_LOG_WQE_SZ 17
+#define MLX5_MPWRQ_WQE_PAGE_ORDER (MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT > 0 ? \
+ MLX5_MPWRQ_LOG_WQE_SZ - PAGE_SHIFT : 0)
+#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
+#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
+ MLX5_MPWRQ_WQE_PAGE_ORDER)
+
+#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
+#define MLX5E_REQUIRED_MTTS(rqs, wqes)\
+ (rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
+#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX)
+
+#define MLX5_UMR_ALIGN (2048)
+#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
+
#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024)
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
+#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
#define MLX5E_SQ_BF_BUDGET 16
#define MLX5E_NUM_MAIN_GROUPS 9
+static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
+{
+ switch (wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW,
+ wq_size / 2);
+ default:
+ return min_t(u16, MLX5E_PARAMS_DEFAULT_MIN_RX_WQES,
+ wq_size / 2);
+ }
+}
+
+static inline int mlx5_min_log_rq_size(int wq_type)
+{
+ switch (wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW;
+ default:
+ return MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+ }
+}
+
+static inline int mlx5_max_log_rq_size(int wq_type)
+{
+ switch (wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW;
+ default:
+ return MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ }
+}
+
+enum {
+ MLX5E_INLINE_MODE_L2,
+ MLX5E_INLINE_MODE_VPORT_CONTEXT,
+ MLX5_INLINE_MODE_NOT_REQUIRED,
+};
+
+struct mlx5e_tx_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_eth_seg eth;
+};
+
+struct mlx5e_rx_wqe {
+ struct mlx5_wqe_srq_next_seg next;
+ struct mlx5_wqe_data_seg data;
+};
+
+struct mlx5e_umr_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_umr_ctrl_seg uctrl;
+ struct mlx5_mkey_seg mkc;
+ struct mlx5_wqe_data_seg data;
+};
+
+static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
+ "rx_cqe_moder",
+};
+
+enum mlx5e_priv_flag {
+ MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
+};
+
+#define MLX5E_SET_PRIV_FLAG(priv, pflag, enable) \
+ do { \
+ if (enable) \
+ priv->pflags |= pflag; \
+ else \
+ priv->pflags &= ~pflag; \
+ } while (0)
+
#ifdef CONFIG_MLX5_CORE_EN_DCB
#define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */
-#define MLX5E_MIN_BW_ALLOC 1 /* Min percentage of BW allocation */
#endif
-static const char vport_strings[][ETH_GSTRING_LEN] = {
- /* vport statistics */
- "rx_packets",
- "rx_bytes",
- "tx_packets",
- "tx_bytes",
- "rx_error_packets",
- "rx_error_bytes",
- "tx_error_packets",
- "tx_error_bytes",
- "rx_unicast_packets",
- "rx_unicast_bytes",
- "tx_unicast_packets",
- "tx_unicast_bytes",
- "rx_multicast_packets",
- "rx_multicast_bytes",
- "tx_multicast_packets",
- "tx_multicast_bytes",
- "rx_broadcast_packets",
- "rx_broadcast_bytes",
- "tx_broadcast_packets",
- "tx_broadcast_bytes",
-
- /* SW counters */
- "tso_packets",
- "tso_bytes",
- "tso_inner_packets",
- "tso_inner_bytes",
- "lro_packets",
- "lro_bytes",
- "rx_csum_good",
- "rx_csum_none",
- "rx_csum_sw",
- "tx_csum_offload",
- "tx_csum_inner",
- "tx_queue_stopped",
- "tx_queue_wake",
- "tx_queue_dropped",
- "rx_wqe_err",
-};
-
-struct mlx5e_vport_stats {
- /* HW counters */
- u64 rx_packets;
- u64 rx_bytes;
- u64 tx_packets;
- u64 tx_bytes;
- u64 rx_error_packets;
- u64 rx_error_bytes;
- u64 tx_error_packets;
- u64 tx_error_bytes;
- u64 rx_unicast_packets;
- u64 rx_unicast_bytes;
- u64 tx_unicast_packets;
- u64 tx_unicast_bytes;
- u64 rx_multicast_packets;
- u64 rx_multicast_bytes;
- u64 tx_multicast_packets;
- u64 tx_multicast_bytes;
- u64 rx_broadcast_packets;
- u64 rx_broadcast_bytes;
- u64 tx_broadcast_packets;
- u64 tx_broadcast_bytes;
-
- /* SW counters */
- u64 tso_packets;
- u64 tso_bytes;
- u64 tso_inner_packets;
- u64 tso_inner_bytes;
- u64 lro_packets;
- u64 lro_bytes;
- u64 rx_csum_good;
- u64 rx_csum_none;
- u64 rx_csum_sw;
- u64 tx_csum_offload;
- u64 tx_csum_inner;
- u64 tx_queue_stopped;
- u64 tx_queue_wake;
- u64 tx_queue_dropped;
- u64 rx_wqe_err;
-
-#define NUM_VPORT_COUNTERS 35
-};
-
-static const char pport_strings[][ETH_GSTRING_LEN] = {
- /* IEEE802.3 counters */
- "frames_tx",
- "frames_rx",
- "check_seq_err",
- "alignment_err",
- "octets_tx",
- "octets_received",
- "multicast_xmitted",
- "broadcast_xmitted",
- "multicast_rx",
- "broadcast_rx",
- "in_range_len_errors",
- "out_of_range_len",
- "too_long_errors",
- "symbol_err",
- "mac_control_tx",
- "mac_control_rx",
- "unsupported_op_rx",
- "pause_ctrl_rx",
- "pause_ctrl_tx",
-
- /* RFC2863 counters */
- "in_octets",
- "in_ucast_pkts",
- "in_discards",
- "in_errors",
- "in_unknown_protos",
- "out_octets",
- "out_ucast_pkts",
- "out_discards",
- "out_errors",
- "in_multicast_pkts",
- "in_broadcast_pkts",
- "out_multicast_pkts",
- "out_broadcast_pkts",
-
- /* RFC2819 counters */
- "drop_events",
- "octets",
- "pkts",
- "broadcast_pkts",
- "multicast_pkts",
- "crc_align_errors",
- "undersize_pkts",
- "oversize_pkts",
- "fragments",
- "jabbers",
- "collisions",
- "p64octets",
- "p65to127octets",
- "p128to255octets",
- "p256to511octets",
- "p512to1023octets",
- "p1024to1518octets",
- "p1519to2047octets",
- "p2048to4095octets",
- "p4096to8191octets",
- "p8192to10239octets",
-};
-
-#define NUM_IEEE_802_3_COUNTERS 19
-#define NUM_RFC_2863_COUNTERS 13
-#define NUM_RFC_2819_COUNTERS 21
-#define NUM_PPORT_COUNTERS (NUM_IEEE_802_3_COUNTERS + \
- NUM_RFC_2863_COUNTERS + \
- NUM_RFC_2819_COUNTERS)
-
-struct mlx5e_pport_stats {
- __be64 IEEE_802_3_counters[NUM_IEEE_802_3_COUNTERS];
- __be64 RFC_2863_counters[NUM_RFC_2863_COUNTERS];
- __be64 RFC_2819_counters[NUM_RFC_2819_COUNTERS];
-};
-
-static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
- "packets",
- "bytes",
- "csum_none",
- "csum_sw",
- "lro_packets",
- "lro_bytes",
- "wqe_err"
-};
-
-struct mlx5e_rq_stats {
- u64 packets;
- u64 bytes;
- u64 csum_none;
- u64 csum_sw;
- u64 lro_packets;
- u64 lro_bytes;
- u64 wqe_err;
-#define NUM_RQ_STATS 7
-};
-
-static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
- "packets",
- "bytes",
- "tso_packets",
- "tso_bytes",
- "tso_inner_packets",
- "tso_inner_bytes",
- "csum_offload_inner",
- "nop",
- "csum_offload_none",
- "stopped",
- "wake",
- "dropped",
-};
-
-struct mlx5e_sq_stats {
- /* commonly accessed in data path */
- u64 packets;
- u64 bytes;
- u64 tso_packets;
- u64 tso_bytes;
- u64 tso_inner_packets;
- u64 tso_inner_bytes;
- u64 csum_offload_inner;
- u64 nop;
- /* less likely accessed in data path */
- u64 csum_offload_none;
- u64 stopped;
- u64 wake;
- u64 dropped;
-#define NUM_SQ_STATS 12
-};
-
-struct mlx5e_stats {
- struct mlx5e_vport_stats vport;
- struct mlx5e_pport_stats pport;
+struct mlx5e_cq_moder {
+ u16 usec;
+ u16 pkts;
};
struct mlx5e_params {
u8 log_sq_size;
+ u8 rq_wq_type;
+ u8 mpwqe_log_stride_sz;
+ u8 mpwqe_log_num_strides;
u8 log_rq_size;
u16 num_channels;
u8 num_tc;
- u16 rx_cq_moderation_usec;
- u16 rx_cq_moderation_pkts;
- u16 tx_cq_moderation_usec;
- u16 tx_cq_moderation_pkts;
+ u8 rx_cq_period_mode;
+ bool rx_cqe_compress_admin;
+ bool rx_cqe_compress;
+ struct mlx5e_cq_moder rx_cq_moderation;
+ struct mlx5e_cq_moder tx_cq_moderation;
u16 min_rx_wqes;
bool lro_en;
u32 lro_wqe_sz;
u16 tx_max_inline;
+ u8 tx_min_inline_mode;
u8 rss_hfunc;
u8 toeplitz_hash_key[40];
u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE];
+ bool vlan_strip_disable;
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct ieee_ets ets;
#endif
+ bool rx_am_enabled;
};
struct mlx5e_tstamp {
@@ -330,7 +223,9 @@ struct mlx5e_tstamp {
};
enum {
- MLX5E_RQ_STATE_POST_WQES_ENABLE,
+ MLX5E_RQ_STATE_FLUSH,
+ MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
+ MLX5E_RQ_STATE_AM,
};
struct mlx5e_cq {
@@ -338,37 +233,124 @@ struct mlx5e_cq {
struct mlx5_cqwq wq;
/* data path - accessed per napi poll */
+ u16 event_ctr;
struct napi_struct *napi;
struct mlx5_core_cq mcq;
struct mlx5e_channel *channel;
struct mlx5e_priv *priv;
+ /* cqe decompression */
+ struct mlx5_cqe64 title;
+ struct mlx5_mini_cqe8 mini_arr[MLX5_MINI_CQE_ARRAY_SIZE];
+ u8 mini_arr_idx;
+ u16 decmprs_left;
+ u16 decmprs_wqe_counter;
+
/* control */
struct mlx5_wq_ctrl wq_ctrl;
} ____cacheline_aligned_in_smp;
+struct mlx5e_rq;
+typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe);
+typedef int (*mlx5e_fp_alloc_wqe)(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe,
+ u16 ix);
+
+typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq *rq, u16 ix);
+
+struct mlx5e_dma_info {
+ struct page *page;
+ dma_addr_t addr;
+};
+
+struct mlx5e_rx_am_stats {
+ int ppms; /* packets per msec */
+ int epms; /* events per msec */
+};
+
+struct mlx5e_rx_am_sample {
+ ktime_t time;
+ unsigned int pkt_ctr;
+ u16 event_ctr;
+};
+
+struct mlx5e_rx_am { /* Adaptive Moderation */
+ u8 state;
+ struct mlx5e_rx_am_stats prev_stats;
+ struct mlx5e_rx_am_sample start_sample;
+ struct work_struct work;
+ u8 profile_ix;
+ u8 mode;
+ u8 tune_state;
+ u8 steps_right;
+ u8 steps_left;
+ u8 tired;
+};
+
struct mlx5e_rq {
/* data path */
struct mlx5_wq_ll wq;
u32 wqe_sz;
struct sk_buff **skb;
+ struct mlx5e_mpw_info *wqe_info;
+ __be32 mkey_be;
+ __be32 umr_mkey_be;
struct device *pdev;
struct net_device *netdev;
struct mlx5e_tstamp *tstamp;
struct mlx5e_rq_stats stats;
struct mlx5e_cq cq;
+ mlx5e_fp_handle_rx_cqe handle_rx_cqe;
+ mlx5e_fp_alloc_wqe alloc_wqe;
+ mlx5e_fp_dealloc_wqe dealloc_wqe;
unsigned long state;
int ix;
+ u32 mpwqe_mtt_offset;
+
+ struct mlx5e_rx_am am; /* Adaptive Moderation */
/* control */
struct mlx5_wq_ctrl wq_ctrl;
+ u8 wq_type;
+ u32 mpwqe_stride_sz;
+ u32 mpwqe_num_strides;
u32 rqn;
struct mlx5e_channel *channel;
struct mlx5e_priv *priv;
} ____cacheline_aligned_in_smp;
+struct mlx5e_umr_dma_info {
+ __be64 *mtt;
+ __be64 *mtt_no_align;
+ dma_addr_t mtt_addr;
+ struct mlx5e_dma_info *dma_info;
+};
+
+struct mlx5e_mpw_info {
+ union {
+ struct mlx5e_dma_info dma_info;
+ struct mlx5e_umr_dma_info umr;
+ };
+ u16 consumed_strides;
+ u16 skbs_frags[MLX5_MPWRQ_PAGES_PER_WQE];
+
+ void (*dma_pre_sync)(struct device *pdev,
+ struct mlx5e_mpw_info *wi,
+ u32 wqe_offset, u32 len);
+ void (*add_skb_frag)(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 frag_offset, u32 len);
+ void (*copy_skb_header)(struct device *pdev,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 offset,
+ u32 headlen);
+ void (*free_wqe)(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
+};
+
struct mlx5e_tx_wqe_info {
u32 num_bytes;
u8 num_wqebbs;
@@ -387,10 +369,15 @@ struct mlx5e_sq_dma {
};
enum {
- MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+ MLX5E_SQ_STATE_FLUSH,
MLX5E_SQ_STATE_BF_ENABLE,
};
+struct mlx5e_ico_wqe_info {
+ u8 opcode;
+ u8 num_wqebbs;
+};
+
struct mlx5e_sq {
/* data path */
@@ -421,6 +408,7 @@ struct mlx5e_sq {
u32 sqn;
u16 bf_buf_size;
u16 max_inline;
+ u8 min_inline_mode;
u16 edge;
struct device *pdev;
struct mlx5e_tstamp *tstamp;
@@ -432,6 +420,8 @@ struct mlx5e_sq {
struct mlx5_uar uar;
struct mlx5e_channel *channel;
int tc;
+ struct mlx5e_ico_wqe_info *ico_wqe_info;
+ u32 rate_limit;
} ____cacheline_aligned_in_smp;
static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
@@ -448,6 +438,7 @@ struct mlx5e_channel {
/* data path */
struct mlx5e_rq rq;
struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
+ struct mlx5e_sq icosq; /* internal control operations */
struct napi_struct napi;
struct device *pdev;
struct net_device *netdev;
@@ -474,42 +465,42 @@ enum mlx5e_traffic_types {
MLX5E_TT_IPV6,
MLX5E_TT_ANY,
MLX5E_NUM_TT,
+ MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
};
-#define IS_HASHING_TT(tt) (tt != MLX5E_TT_ANY)
+enum {
+ MLX5E_STATE_ASYNC_EVENTS_ENABLED,
+ MLX5E_STATE_OPENED,
+ MLX5E_STATE_DESTROYING,
+};
-enum mlx5e_rqt_ix {
- MLX5E_INDIRECTION_RQT,
- MLX5E_SINGLE_RQ_RQT,
- MLX5E_NUM_RQT,
+struct mlx5e_vxlan_db {
+ spinlock_t lock; /* protect vxlan table */
+ struct radix_tree_root tree;
};
-struct mlx5e_eth_addr_info {
+struct mlx5e_l2_rule {
u8 addr[ETH_ALEN + 2];
- u32 tt_vec;
- struct mlx5_flow_rule *ft_rule[MLX5E_NUM_TT];
+ struct mlx5_flow_rule *rule;
};
-#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
-
-struct mlx5e_eth_addr_db {
- struct hlist_head netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE];
- struct hlist_head netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE];
- struct mlx5e_eth_addr_info broadcast;
- struct mlx5e_eth_addr_info allmulti;
- struct mlx5e_eth_addr_info promisc;
- bool broadcast_enabled;
- bool allmulti_enabled;
- bool promisc_enabled;
+struct mlx5e_flow_table {
+ int num_groups;
+ struct mlx5_flow_table *t;
+ struct mlx5_flow_group **g;
};
-enum {
- MLX5E_STATE_ASYNC_EVENTS_ENABLE,
- MLX5E_STATE_OPENED,
- MLX5E_STATE_DESTROYING,
+#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
+
+struct mlx5e_tc_table {
+ struct mlx5_flow_table *t;
+
+ struct rhashtable_params ht_params;
+ struct rhashtable ht;
};
-struct mlx5e_vlan_db {
+struct mlx5e_vlan_table {
+ struct mlx5e_flow_table ft;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct mlx5_flow_rule *active_vlans_rule[VLAN_N_VID];
struct mlx5_flow_rule *untagged_rule;
@@ -517,29 +508,112 @@ struct mlx5e_vlan_db {
bool filter_disabled;
};
-struct mlx5e_vxlan_db {
- spinlock_t lock; /* protect vxlan table */
- struct radix_tree_root tree;
+struct mlx5e_l2_table {
+ struct mlx5e_flow_table ft;
+ struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
+ struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
+ struct mlx5e_l2_rule broadcast;
+ struct mlx5e_l2_rule allmulti;
+ struct mlx5e_l2_rule promisc;
+ bool broadcast_enabled;
+ bool allmulti_enabled;
+ bool promisc_enabled;
};
-struct mlx5e_flow_table {
- int num_groups;
- struct mlx5_flow_table *t;
- struct mlx5_flow_group **g;
+/* L3/L4 traffic type classifier */
+struct mlx5e_ttc_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_rule *rules[MLX5E_NUM_TT];
};
-struct mlx5e_tc_flow_table {
- struct mlx5_flow_table *t;
+#define ARFS_HASH_SHIFT BITS_PER_BYTE
+#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
+struct arfs_table {
+ struct mlx5e_flow_table ft;
+ struct mlx5_flow_rule *default_rule;
+ struct hlist_head rules_hash[ARFS_HASH_SIZE];
+};
- struct rhashtable_params ht_params;
- struct rhashtable ht;
+enum arfs_type {
+ ARFS_IPV4_TCP,
+ ARFS_IPV6_TCP,
+ ARFS_IPV4_UDP,
+ ARFS_IPV6_UDP,
+ ARFS_NUM_TYPES,
+};
+
+struct mlx5e_arfs_tables {
+ struct arfs_table arfs_tables[ARFS_NUM_TYPES];
+ /* Protect aRFS rules list */
+ spinlock_t arfs_lock;
+ struct list_head rules;
+ int last_filter_id;
+ struct workqueue_struct *wq;
+};
+
+/* NIC prio FTS */
+enum {
+ MLX5E_VLAN_FT_LEVEL = 0,
+ MLX5E_L2_FT_LEVEL,
+ MLX5E_TTC_FT_LEVEL,
+ MLX5E_ARFS_FT_LEVEL
+};
+
+struct mlx5e_ethtool_table {
+ struct mlx5_flow_table *ft;
+ int num_rules;
+};
+
+#define ETHTOOL_NUM_L3_L4_FTS 7
+#define ETHTOOL_NUM_L2_FTS 4
+
+struct mlx5e_ethtool_steering {
+ struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
+ struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
+ struct list_head rules;
+ int tot_num_rules;
+};
+
+struct mlx5e_flow_steering {
+ struct mlx5_flow_namespace *ns;
+ struct mlx5e_ethtool_steering ethtool;
+ struct mlx5e_tc_table tc;
+ struct mlx5e_vlan_table vlan;
+ struct mlx5e_l2_table l2;
+ struct mlx5e_ttc_table ttc;
+ struct mlx5e_arfs_tables arfs;
+};
+
+struct mlx5e_rqt {
+ u32 rqtn;
+ bool enabled;
};
-struct mlx5e_flow_tables {
- struct mlx5_flow_namespace *ns;
- struct mlx5e_tc_flow_table tc;
- struct mlx5e_flow_table vlan;
- struct mlx5e_flow_table main;
+struct mlx5e_tir {
+ u32 tirn;
+ struct mlx5e_rqt rqt;
+ struct list_head list;
+};
+
+enum {
+ MLX5E_TC_PRIO = 0,
+ MLX5E_NIC_PRIO
+};
+
+struct mlx5e_profile {
+ void (*init)(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile, void *ppriv);
+ void (*cleanup)(struct mlx5e_priv *priv);
+ int (*init_rx)(struct mlx5e_priv *priv);
+ void (*cleanup_rx)(struct mlx5e_priv *priv);
+ int (*init_tx)(struct mlx5e_priv *priv);
+ void (*cleanup_tx)(struct mlx5e_priv *priv);
+ void (*enable)(struct mlx5e_priv *priv);
+ void (*disable)(struct mlx5e_priv *priv);
+ void (*update_stats)(struct mlx5e_priv *priv);
+ int (*max_nch)(struct mlx5_core_dev *mdev);
+ int max_tc;
};
struct mlx5e_priv {
@@ -550,46 +624,34 @@ struct mlx5e_priv {
unsigned long state;
struct mutex state_lock; /* Protects Interface state */
- struct mlx5_uar cq_uar;
- u32 pdn;
- u32 tdn;
- struct mlx5_core_mkey mkey;
+ struct mlx5_core_mkey umr_mkey;
struct mlx5e_rq drop_rq;
struct mlx5e_channel **channel;
u32 tisn[MLX5E_MAX_NUM_TC];
- u32 rqtn[MLX5E_NUM_RQT];
- u32 tirn[MLX5E_NUM_TT];
+ struct mlx5e_rqt indir_rqt;
+ struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
+ struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS];
+ u32 tx_rates[MLX5E_MAX_NUM_SQS];
- struct mlx5e_flow_tables fts;
- struct mlx5e_eth_addr_db eth_addr;
- struct mlx5e_vlan_db vlan;
-#ifdef CONFIG_MLX5_CORE_EN_VXLAN
+ struct mlx5e_flow_steering fs;
struct mlx5e_vxlan_db vxlan;
-#endif
struct mlx5e_params params;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
struct work_struct set_rx_mode_work;
+ struct work_struct tx_timeout_work;
struct delayed_work update_stats_work;
+ u32 pflags;
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_stats stats;
struct mlx5e_tstamp tstamp;
-};
-
-#define MLX5E_NET_IP_ALIGN 2
-
-struct mlx5e_tx_wqe {
- struct mlx5_wqe_ctrl_seg ctrl;
- struct mlx5_wqe_eth_seg eth;
-};
-
-struct mlx5e_rx_wqe {
- struct mlx5_wqe_srq_next_seg next;
- struct mlx5_wqe_data_seg data;
+ u16 q_counter;
+ const struct mlx5e_profile *profile;
+ void *ppriv;
};
enum mlx5e_link_mode {
@@ -607,6 +669,7 @@ enum mlx5e_link_mode {
MLX5E_10GBASE_ER = 14,
MLX5E_40GBASE_SR4 = 15,
MLX5E_40GBASE_LR4 = 16,
+ MLX5E_50GBASE_SR2 = 18,
MLX5E_100GBASE_CR4 = 20,
MLX5E_100GBASE_SR4 = 21,
MLX5E_100GBASE_KR4 = 22,
@@ -624,6 +687,9 @@ enum mlx5e_link_mode {
#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
+void mlx5e_build_ptys2ethtool_map(void);
+
void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw);
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback);
@@ -634,14 +700,52 @@ void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
+
+void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix);
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
+void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq);
+void mlx5e_complete_rx_linear_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ u16 byte_cnt,
+ struct mlx5e_mpw_info *wi,
+ struct sk_buff *skb);
+void mlx5e_complete_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ u16 byte_cnt,
+ struct mlx5e_mpw_info *wi,
+ struct sk_buff *skb);
+void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi);
+void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi);
struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+void mlx5e_rx_am(struct mlx5e_rq *rq);
+void mlx5e_rx_am_work(struct work_struct *work);
+struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode);
+
void mlx5e_update_stats(struct mlx5e_priv *priv);
-int mlx5e_create_flow_tables(struct mlx5e_priv *priv);
-void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv);
-void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
+int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
+ int location);
+int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
+ struct ethtool_rxnfc *info, u32 *rule_locs);
+int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
+ struct ethtool_rx_flow_spec *fs);
+int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
+ int location);
+void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
@@ -650,6 +754,7 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv);
void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
+void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
@@ -658,16 +763,23 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix);
+int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd);
+
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix);
void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv);
int mlx5e_open_locked(struct net_device *netdev);
int mlx5e_close_locked(struct net_device *netdev);
-void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
+ u32 *indirection_rqt, int len,
int num_channels);
+int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
+ u8 cq_period_mode);
static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
- struct mlx5e_tx_wqe *wqe, int bf_sz)
+ struct mlx5_wqe_ctrl_seg *ctrl, int bf_sz)
{
u16 ofst = MLX5_BF_OFFSET + sq->bf_offset;
@@ -681,9 +793,9 @@ static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
*/
wmb();
if (bf_sz)
- __iowrite64_copy(sq->uar_map + ofst, &wqe->ctrl, bf_sz);
+ __iowrite64_copy(sq->uar_map + ofst, ctrl, bf_sz);
else
- mlx5_write64((__be32 *)&wqe->ctrl, sq->uar_map + ofst, NULL);
+ mlx5_write64((__be32 *)ctrl, sq->uar_map + ofst, NULL);
/* flush the write-combining mapped buffer */
wmb();
@@ -710,6 +822,66 @@ extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
#endif
+#ifndef CONFIG_RFS_ACCEL
+static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+{
+ return 0;
+}
+
+static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
+
+static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+{
+ return -ENOTSUPP;
+}
+
+static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+{
+ return -ENOTSUPP;
+}
+#else
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
+int mlx5e_arfs_enable(struct mlx5e_priv *priv);
+int mlx5e_arfs_disable(struct mlx5e_priv *priv);
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id);
+#endif
+
u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev);
+int mlx5e_create_tir(struct mlx5_core_dev *mdev,
+ struct mlx5e_tir *tir, u32 *in, int inlen);
+void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
+ struct mlx5e_tir *tir);
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
+void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
+int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev);
+
+struct mlx5_eswitch_rep;
+int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep);
+void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv);
+void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv);
+int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr);
+
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
+void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
+int mlx5e_create_tises(struct mlx5e_priv *priv);
+void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv);
+int mlx5e_close(struct net_device *netdev);
+int mlx5e_open(struct net_device *netdev);
+void mlx5e_update_stats_work(struct work_struct *work);
+void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile, void *ppriv);
+void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv);
+struct rtnl_link_stats64 *
+mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
new file mode 100644
index 000000000000..a8cb38789774
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -0,0 +1,742 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifdef CONFIG_RFS_ACCEL
+
+#include <linux/hash.h>
+#include <linux/mlx5/fs.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include "en.h"
+
+struct arfs_tuple {
+ __be16 etype;
+ u8 ip_proto;
+ union {
+ __be32 src_ipv4;
+ struct in6_addr src_ipv6;
+ };
+ union {
+ __be32 dst_ipv4;
+ struct in6_addr dst_ipv6;
+ };
+ __be16 src_port;
+ __be16 dst_port;
+};
+
+struct arfs_rule {
+ struct mlx5e_priv *priv;
+ struct work_struct arfs_work;
+ struct mlx5_flow_rule *rule;
+ struct hlist_node hlist;
+ int rxq;
+ /* Flow ID passed to ndo_rx_flow_steer */
+ int flow_id;
+ /* Filter ID returned by ndo_rx_flow_steer */
+ int filter_id;
+ struct arfs_tuple tuple;
+};
+
+#define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
+ for (i = 0; i < ARFS_NUM_TYPES; i++) \
+ mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
+
+#define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
+ for (j = 0; j < ARFS_HASH_SIZE; j++) \
+ hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
+
+static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
+{
+ switch (type) {
+ case ARFS_IPV4_TCP:
+ return MLX5E_TT_IPV4_TCP;
+ case ARFS_IPV4_UDP:
+ return MLX5E_TT_IPV4_UDP;
+ case ARFS_IPV6_TCP:
+ return MLX5E_TT_IPV6_TCP;
+ case ARFS_IPV6_UDP:
+ return MLX5E_TT_IPV6_UDP;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int arfs_disable(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_destination dest;
+ struct mlx5e_tir *tir = priv->indir_tir;
+ int err = 0;
+ int tt;
+ int i;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ for (i = 0; i < ARFS_NUM_TYPES; i++) {
+ dest.tir_num = tir[i].tirn;
+ tt = arfs_get_tt(i);
+ /* Modify ttc rules destination to bypass the aRFS tables*/
+ err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
+ &dest);
+ if (err) {
+ netdev_err(priv->netdev,
+ "%s: modify ttc destination failed\n",
+ __func__);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void arfs_del_rules(struct mlx5e_priv *priv);
+
+int mlx5e_arfs_disable(struct mlx5e_priv *priv)
+{
+ arfs_del_rules(priv);
+
+ return arfs_disable(priv);
+}
+
+int mlx5e_arfs_enable(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_destination dest;
+ int err = 0;
+ int tt;
+ int i;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ for (i = 0; i < ARFS_NUM_TYPES; i++) {
+ dest.ft = priv->fs.arfs.arfs_tables[i].ft.t;
+ tt = arfs_get_tt(i);
+ /* Modify ttc rules destination to point on the aRFS FTs */
+ err = mlx5_modify_rule_destination(priv->fs.ttc.rules[tt],
+ &dest);
+ if (err) {
+ netdev_err(priv->netdev,
+ "%s: modify ttc destination failed err=%d\n",
+ __func__, err);
+ arfs_disable(priv);
+ return err;
+ }
+ }
+ return 0;
+}
+
+static void arfs_destroy_table(struct arfs_table *arfs_t)
+{
+ mlx5_del_flow_rule(arfs_t->default_rule);
+ mlx5e_destroy_flow_table(&arfs_t->ft);
+}
+
+void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
+{
+ int i;
+
+ if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+ return;
+
+ arfs_del_rules(priv);
+ destroy_workqueue(priv->fs.arfs.wq);
+ for (i = 0; i < ARFS_NUM_TYPES; i++) {
+ if (!IS_ERR_OR_NULL(priv->fs.arfs.arfs_tables[i].ft.t))
+ arfs_destroy_table(&priv->fs.arfs.arfs_tables[i]);
+ }
+}
+
+static int arfs_add_default_rule(struct mlx5e_priv *priv,
+ enum arfs_type type)
+{
+ struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
+ struct mlx5_flow_destination dest;
+ struct mlx5e_tir *tir = priv->indir_tir;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ switch (type) {
+ case ARFS_IPV4_TCP:
+ dest.tir_num = tir[MLX5E_TT_IPV4_TCP].tirn;
+ break;
+ case ARFS_IPV4_UDP:
+ dest.tir_num = tir[MLX5E_TT_IPV4_UDP].tirn;
+ break;
+ case ARFS_IPV6_TCP:
+ dest.tir_num = tir[MLX5E_TT_IPV6_TCP].tirn;
+ break;
+ case ARFS_IPV6_UDP:
+ dest.tir_num = tir[MLX5E_TT_IPV6_UDP].tirn;
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ arfs_t->default_rule = mlx5_add_flow_rule(arfs_t->ft.t, spec,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG,
+ &dest);
+ if (IS_ERR(arfs_t->default_rule)) {
+ err = PTR_ERR(arfs_t->default_rule);
+ arfs_t->default_rule = NULL;
+ netdev_err(priv->netdev, "%s: add rule failed, arfs type=%d\n",
+ __func__, type);
+ }
+out:
+ kvfree(spec);
+ return err;
+}
+
+#define MLX5E_ARFS_NUM_GROUPS 2
+#define MLX5E_ARFS_GROUP1_SIZE BIT(12)
+#define MLX5E_ARFS_GROUP2_SIZE BIT(0)
+#define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\
+ MLX5E_ARFS_GROUP2_SIZE)
+static int arfs_create_groups(struct mlx5e_flow_table *ft,
+ enum arfs_type type)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *outer_headers_c;
+ int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
+ sizeof(*ft->g), GFP_KERNEL);
+ in = mlx5_vzalloc(inlen);
+ if (!in || !ft->g) {
+ kvfree(ft->g);
+ kvfree(in);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
+ outer_headers);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
+ switch (type) {
+ case ARFS_IPV4_TCP:
+ case ARFS_IPV6_TCP:
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
+ break;
+ case ARFS_IPV4_UDP:
+ case ARFS_IPV6_UDP:
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ switch (type) {
+ case ARFS_IPV4_TCP:
+ case ARFS_IPV4_UDP:
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ break;
+ case ARFS_IPV6_TCP:
+ case ARFS_IPV6_UDP:
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ 0xff, 16);
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 0xff, 16);
+ break;
+ default:
+ err = -EINVAL;
+ goto out;
+ }
+
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_ARFS_GROUP1_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_ARFS_GROUP2_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err;
+ ft->num_groups++;
+
+ kvfree(in);
+ return 0;
+
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+out:
+ kvfree(in);
+
+ return err;
+}
+
+static int arfs_create_table(struct mlx5e_priv *priv,
+ enum arfs_type type)
+{
+ struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
+ int err;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+ MLX5E_ARFS_TABLE_SIZE, MLX5E_ARFS_FT_LEVEL);
+ if (IS_ERR(ft->t)) {
+ err = PTR_ERR(ft->t);
+ ft->t = NULL;
+ return err;
+ }
+
+ err = arfs_create_groups(ft, type);
+ if (err)
+ goto err;
+
+ err = arfs_add_default_rule(priv, type);
+ if (err)
+ goto err;
+
+ return 0;
+err:
+ mlx5e_destroy_flow_table(ft);
+ return err;
+}
+
+int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
+{
+ int err = 0;
+ int i;
+
+ if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
+ return 0;
+
+ spin_lock_init(&priv->fs.arfs.arfs_lock);
+ INIT_LIST_HEAD(&priv->fs.arfs.rules);
+ priv->fs.arfs.wq = create_singlethread_workqueue("mlx5e_arfs");
+ if (!priv->fs.arfs.wq)
+ return -ENOMEM;
+
+ for (i = 0; i < ARFS_NUM_TYPES; i++) {
+ err = arfs_create_table(priv, i);
+ if (err)
+ goto err;
+ }
+ return 0;
+err:
+ mlx5e_arfs_destroy_tables(priv);
+ return err;
+}
+
+#define MLX5E_ARFS_EXPIRY_QUOTA 60
+
+static void arfs_may_expire_flow(struct mlx5e_priv *priv)
+{
+ struct arfs_rule *arfs_rule;
+ struct hlist_node *htmp;
+ int quota = 0;
+ int i;
+ int j;
+
+ HLIST_HEAD(del_list);
+ spin_lock_bh(&priv->fs.arfs.arfs_lock);
+ mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+ if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
+ break;
+ if (!work_pending(&arfs_rule->arfs_work) &&
+ rps_may_expire_flow(priv->netdev,
+ arfs_rule->rxq, arfs_rule->flow_id,
+ arfs_rule->filter_id)) {
+ hlist_del_init(&arfs_rule->hlist);
+ hlist_add_head(&arfs_rule->hlist, &del_list);
+ }
+ }
+ spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+ hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
+ if (arfs_rule->rule)
+ mlx5_del_flow_rule(arfs_rule->rule);
+ hlist_del(&arfs_rule->hlist);
+ kfree(arfs_rule);
+ }
+}
+
+static void arfs_del_rules(struct mlx5e_priv *priv)
+{
+ struct hlist_node *htmp;
+ struct arfs_rule *rule;
+ int i;
+ int j;
+
+ HLIST_HEAD(del_list);
+ spin_lock_bh(&priv->fs.arfs.arfs_lock);
+ mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
+ hlist_del_init(&rule->hlist);
+ hlist_add_head(&rule->hlist, &del_list);
+ }
+ spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+
+ hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
+ cancel_work_sync(&rule->arfs_work);
+ if (rule->rule)
+ mlx5_del_flow_rule(rule->rule);
+ hlist_del(&rule->hlist);
+ kfree(rule);
+ }
+}
+
+static struct hlist_head *
+arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
+ __be16 dst_port)
+{
+ unsigned long l;
+ int bucket_idx;
+
+ l = (__force unsigned long)src_port |
+ ((__force unsigned long)dst_port << 2);
+
+ bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
+
+ return &arfs_t->rules_hash[bucket_idx];
+}
+
+static u8 arfs_get_ip_proto(const struct sk_buff *skb)
+{
+ return (skb->protocol == htons(ETH_P_IP)) ?
+ ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
+}
+
+static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
+ u8 ip_proto, __be16 etype)
+{
+ if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
+ return &arfs->arfs_tables[ARFS_IPV4_TCP];
+ if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
+ return &arfs->arfs_tables[ARFS_IPV4_UDP];
+ if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
+ return &arfs->arfs_tables[ARFS_IPV6_TCP];
+ if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
+ return &arfs->arfs_tables[ARFS_IPV6_UDP];
+
+ return NULL;
+}
+
+static struct mlx5_flow_rule *arfs_add_rule(struct mlx5e_priv *priv,
+ struct arfs_rule *arfs_rule)
+{
+ struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct arfs_tuple *tuple = &arfs_rule->tuple;
+ struct mlx5_flow_rule *rule = NULL;
+ struct mlx5_flow_destination dest;
+ struct arfs_table *arfs_table;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_table *ft;
+ int err = 0;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto out;
+ }
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
+ ntohs(tuple->etype));
+ arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
+ if (!arfs_table) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ ft = arfs_table->ft.t;
+ if (tuple->ip_proto == IPPROTO_TCP) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.tcp_dport);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.tcp_sport);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
+ ntohs(tuple->dst_port));
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
+ ntohs(tuple->src_port));
+ } else {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.udp_dport);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.udp_sport);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
+ ntohs(tuple->dst_port));
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
+ ntohs(tuple->src_port));
+ }
+ if (tuple->etype == htons(ETH_P_IP)) {
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &tuple->src_ipv4,
+ 4);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &tuple->dst_ipv4,
+ 4);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
+ } else {
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &tuple->src_ipv6,
+ 16);
+ memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &tuple->dst_ipv6,
+ 16);
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ 0xff,
+ 16);
+ memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ 0xff,
+ 16);
+ }
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn;
+ rule = mlx5_add_flow_rule(ft, spec, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG,
+ &dest);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "%s: add rule(filter id=%d, rq idx=%d) failed, err=%d\n",
+ __func__, arfs_rule->filter_id, arfs_rule->rxq, err);
+ }
+
+out:
+ kvfree(spec);
+ return err ? ERR_PTR(err) : rule;
+}
+
+static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
+ struct mlx5_flow_rule *rule, u16 rxq)
+{
+ struct mlx5_flow_destination dst;
+ int err = 0;
+
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dst.tir_num = priv->direct_tir[rxq].tirn;
+ err = mlx5_modify_rule_destination(rule, &dst);
+ if (err)
+ netdev_warn(priv->netdev,
+ "Failed to modfiy aRFS rule destination to rq=%d\n", rxq);
+}
+
+static void arfs_handle_work(struct work_struct *work)
+{
+ struct arfs_rule *arfs_rule = container_of(work,
+ struct arfs_rule,
+ arfs_work);
+ struct mlx5e_priv *priv = arfs_rule->priv;
+ struct mlx5_flow_rule *rule;
+
+ mutex_lock(&priv->state_lock);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ spin_lock_bh(&priv->fs.arfs.arfs_lock);
+ hlist_del(&arfs_rule->hlist);
+ spin_unlock_bh(&priv->fs.arfs.arfs_lock);
+
+ mutex_unlock(&priv->state_lock);
+ kfree(arfs_rule);
+ goto out;
+ }
+ mutex_unlock(&priv->state_lock);
+
+ if (!arfs_rule->rule) {
+ rule = arfs_add_rule(priv, arfs_rule);
+ if (IS_ERR(rule))
+ goto out;
+ arfs_rule->rule = rule;
+ } else {
+ arfs_modify_rule_rq(priv, arfs_rule->rule,
+ arfs_rule->rxq);
+ }
+out:
+ arfs_may_expire_flow(priv);
+}
+
+/* return L4 destination port from ip4/6 packets */
+static __be16 arfs_get_dst_port(const struct sk_buff *skb)
+{
+ char *transport_header;
+
+ transport_header = skb_transport_header(skb);
+ if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
+ return ((struct tcphdr *)transport_header)->dest;
+ return ((struct udphdr *)transport_header)->dest;
+}
+
+/* return L4 source port from ip4/6 packets */
+static __be16 arfs_get_src_port(const struct sk_buff *skb)
+{
+ char *transport_header;
+
+ transport_header = skb_transport_header(skb);
+ if (arfs_get_ip_proto(skb) == IPPROTO_TCP)
+ return ((struct tcphdr *)transport_header)->source;
+ return ((struct udphdr *)transport_header)->source;
+}
+
+static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
+ struct arfs_table *arfs_t,
+ const struct sk_buff *skb,
+ u16 rxq, u32 flow_id)
+{
+ struct arfs_rule *rule;
+ struct arfs_tuple *tuple;
+
+ rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
+ if (!rule)
+ return NULL;
+
+ rule->priv = priv;
+ rule->rxq = rxq;
+ INIT_WORK(&rule->arfs_work, arfs_handle_work);
+
+ tuple = &rule->tuple;
+ tuple->etype = skb->protocol;
+ if (tuple->etype == htons(ETH_P_IP)) {
+ tuple->src_ipv4 = ip_hdr(skb)->saddr;
+ tuple->dst_ipv4 = ip_hdr(skb)->daddr;
+ } else {
+ memcpy(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+ sizeof(struct in6_addr));
+ memcpy(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+ sizeof(struct in6_addr));
+ }
+ tuple->ip_proto = arfs_get_ip_proto(skb);
+ tuple->src_port = arfs_get_src_port(skb);
+ tuple->dst_port = arfs_get_dst_port(skb);
+
+ rule->flow_id = flow_id;
+ rule->filter_id = priv->fs.arfs.last_filter_id++ % RPS_NO_FILTER;
+
+ hlist_add_head(&rule->hlist,
+ arfs_hash_bucket(arfs_t, tuple->src_port,
+ tuple->dst_port));
+ return rule;
+}
+
+static bool arfs_cmp_ips(struct arfs_tuple *tuple,
+ const struct sk_buff *skb)
+{
+ if (tuple->etype == htons(ETH_P_IP) &&
+ tuple->src_ipv4 == ip_hdr(skb)->saddr &&
+ tuple->dst_ipv4 == ip_hdr(skb)->daddr)
+ return true;
+ if (tuple->etype == htons(ETH_P_IPV6) &&
+ (!memcmp(&tuple->src_ipv6, &ipv6_hdr(skb)->saddr,
+ sizeof(struct in6_addr))) &&
+ (!memcmp(&tuple->dst_ipv6, &ipv6_hdr(skb)->daddr,
+ sizeof(struct in6_addr))))
+ return true;
+ return false;
+}
+
+static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
+ const struct sk_buff *skb)
+{
+ struct arfs_rule *arfs_rule;
+ struct hlist_head *head;
+ __be16 src_port = arfs_get_src_port(skb);
+ __be16 dst_port = arfs_get_dst_port(skb);
+
+ head = arfs_hash_bucket(arfs_t, src_port, dst_port);
+ hlist_for_each_entry(arfs_rule, head, hlist) {
+ if (arfs_rule->tuple.src_port == src_port &&
+ arfs_rule->tuple.dst_port == dst_port &&
+ arfs_cmp_ips(&arfs_rule->tuple, skb)) {
+ return arfs_rule;
+ }
+ }
+
+ return NULL;
+}
+
+int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
+ struct arfs_table *arfs_t;
+ struct arfs_rule *arfs_rule;
+
+ if (skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+
+ arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
+ if (!arfs_t)
+ return -EPROTONOSUPPORT;
+
+ spin_lock_bh(&arfs->arfs_lock);
+ arfs_rule = arfs_find_rule(arfs_t, skb);
+ if (arfs_rule) {
+ if (arfs_rule->rxq == rxq_index) {
+ spin_unlock_bh(&arfs->arfs_lock);
+ return arfs_rule->filter_id;
+ }
+ arfs_rule->rxq = rxq_index;
+ } else {
+ arfs_rule = arfs_alloc_rule(priv, arfs_t, skb,
+ rxq_index, flow_id);
+ if (!arfs_rule) {
+ spin_unlock_bh(&arfs->arfs_lock);
+ return -ENOMEM;
+ }
+ }
+ queue_work(priv->fs.arfs.wq, &arfs_rule->arfs_work);
+ spin_unlock_bh(&arfs->arfs_lock);
+ return arfs_rule->filter_id;
+}
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 2018eebe1531..847a8f3ac2b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -93,6 +93,8 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
+ /* Reset CQE compression to Admin default */
+ mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_admin);
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -108,6 +110,8 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_SYNC:
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* Disable CQE compression */
+ mlx5e_modify_rx_cqe_compression(priv, false);
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
new file mode 100644
index 000000000000..9cce153e1035
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+/* mlx5e global resources should be placed in this file.
+ * Global resources are common to all the netdevices crated on the same nic.
+ */
+
+int mlx5e_create_tir(struct mlx5_core_dev *mdev,
+ struct mlx5e_tir *tir, u32 *in, int inlen)
+{
+ int err;
+
+ err = mlx5_core_create_tir(mdev, in, inlen, &tir->tirn);
+ if (err)
+ return err;
+
+ list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
+
+ return 0;
+}
+
+void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
+ struct mlx5e_tir *tir)
+{
+ mlx5_core_destroy_tir(mdev, tir->tirn);
+ list_del(&tir->list);
+}
+
+static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
+ struct mlx5_core_mkey *mkey)
+{
+ struct mlx5_create_mkey_mbox_in *in;
+ int err;
+
+ in = mlx5_vzalloc(sizeof(*in));
+ if (!in)
+ return -ENOMEM;
+
+ in->seg.flags = MLX5_PERM_LOCAL_WRITE |
+ MLX5_PERM_LOCAL_READ |
+ MLX5_ACCESS_MODE_PA;
+ in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+ err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL,
+ NULL);
+
+ kvfree(in);
+
+ return err;
+}
+
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
+{
+ struct mlx5e_resources *res = &mdev->mlx5e_res;
+ int err;
+
+ err = mlx5_alloc_map_uar(mdev, &res->cq_uar, false);
+ if (err) {
+ mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_core_alloc_pd(mdev, &res->pdn);
+ if (err) {
+ mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
+ goto err_unmap_free_uar;
+ }
+
+ err = mlx5_core_alloc_transport_domain(mdev, &res->td.tdn);
+ if (err) {
+ mlx5_core_err(mdev, "alloc td failed, %d\n", err);
+ goto err_dealloc_pd;
+ }
+
+ err = mlx5e_create_mkey(mdev, res->pdn, &res->mkey);
+ if (err) {
+ mlx5_core_err(mdev, "create mkey failed, %d\n", err);
+ goto err_dealloc_transport_domain;
+ }
+
+ INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
+
+ return 0;
+
+err_dealloc_transport_domain:
+ mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(mdev, res->pdn);
+err_unmap_free_uar:
+ mlx5_unmap_free_uar(mdev, &res->cq_uar);
+
+ return err;
+}
+
+void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
+{
+ struct mlx5e_resources *res = &mdev->mlx5e_res;
+
+ mlx5_core_destroy_mkey(mdev, &res->mkey);
+ mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
+ mlx5_core_dealloc_pd(mdev, res->pdn);
+ mlx5_unmap_free_uar(mdev, &res->cq_uar);
+}
+
+int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
+{
+ struct mlx5e_tir *tir;
+ void *in;
+ int inlen;
+ int err = 0;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+
+ list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
+ err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
+ if (err)
+ goto out;
+ }
+
+out:
+ kvfree(in);
+
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 3036f279a8fd..762af16ed021 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -96,7 +96,7 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
break;
case IEEE_8021QAZ_TSA_ETS:
- tc_tx_bw[i] = ets->tc_tx_bw[i] ?: MLX5E_MIN_BW_ALLOC;
+ tc_tx_bw[i] = ets->tc_tx_bw[i];
break;
}
}
@@ -127,25 +127,40 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
}
-static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
+static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
+ struct ieee_ets *ets)
{
int bw_sum = 0;
int i;
/* Validate Priority */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY)
+ if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
+ netdev_err(netdev,
+ "Failed to validate ETS: priority value greater than max(%d)\n",
+ MLX5E_MAX_PRIORITY);
return -EINVAL;
+ }
}
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS)
+ if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
+ if (!ets->tc_tx_bw[i]) {
+ netdev_err(netdev,
+ "Failed to validate ETS: BW 0 is illegal\n");
+ return -EINVAL;
+ }
+
bw_sum += ets->tc_tx_bw[i];
+ }
}
- if (bw_sum != 0 && bw_sum != 100)
+ if (bw_sum != 0 && bw_sum != 100) {
+ netdev_err(netdev,
+ "Failed to validate ETS: BW sum is illegal\n");
return -EINVAL;
+ }
return 0;
}
@@ -155,7 +170,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
- err = mlx5e_dbcnl_validate_ets(ets);
+ err = mlx5e_dbcnl_validate_ets(netdev, ets);
if (err)
return err;
@@ -174,8 +189,14 @@ static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ int i;
pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
+ pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
+ }
return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
}
@@ -185,7 +206,6 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
- enum mlx5_port_status ps;
u8 curr_pfc_en;
int ret;
@@ -194,14 +214,8 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
if (pfc->pfc_en == curr_pfc_en)
return 0;
- mlx5_query_port_admin_status(mdev, &ps);
- if (ps == MLX5_PORT_UP)
- mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
-
ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
-
- if (ps == MLX5_PORT_UP)
- mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
+ mlx5_toggle_port_link(mdev);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 3476ab844634..7a346bb2ed00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -48,177 +48,229 @@ static void mlx5e_get_drvinfo(struct net_device *dev,
sizeof(drvinfo->bus_info));
}
-static const struct {
- u32 supported;
- u32 advertised;
+struct ptys2ethtool_config {
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised);
u32 speed;
-} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = {
- [MLX5E_1000BASE_CX_SGMII] = {
- .supported = SUPPORTED_1000baseKX_Full,
- .advertised = ADVERTISED_1000baseKX_Full,
- .speed = 1000,
- },
- [MLX5E_1000BASE_KX] = {
- .supported = SUPPORTED_1000baseKX_Full,
- .advertised = ADVERTISED_1000baseKX_Full,
- .speed = 1000,
- },
- [MLX5E_10GBASE_CX4] = {
- .supported = SUPPORTED_10000baseKX4_Full,
- .advertised = ADVERTISED_10000baseKX4_Full,
- .speed = 10000,
- },
- [MLX5E_10GBASE_KX4] = {
- .supported = SUPPORTED_10000baseKX4_Full,
- .advertised = ADVERTISED_10000baseKX4_Full,
- .speed = 10000,
- },
- [MLX5E_10GBASE_KR] = {
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
- },
- [MLX5E_20GBASE_KR2] = {
- .supported = SUPPORTED_20000baseKR2_Full,
- .advertised = ADVERTISED_20000baseKR2_Full,
- .speed = 20000,
- },
- [MLX5E_40GBASE_CR4] = {
- .supported = SUPPORTED_40000baseCR4_Full,
- .advertised = ADVERTISED_40000baseCR4_Full,
- .speed = 40000,
- },
- [MLX5E_40GBASE_KR4] = {
- .supported = SUPPORTED_40000baseKR4_Full,
- .advertised = ADVERTISED_40000baseKR4_Full,
- .speed = 40000,
- },
- [MLX5E_56GBASE_R4] = {
- .supported = SUPPORTED_56000baseKR4_Full,
- .advertised = ADVERTISED_56000baseKR4_Full,
- .speed = 56000,
- },
- [MLX5E_10GBASE_CR] = {
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
- },
- [MLX5E_10GBASE_SR] = {
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
- },
- [MLX5E_10GBASE_ER] = {
- .supported = SUPPORTED_10000baseKR_Full,
- .advertised = ADVERTISED_10000baseKR_Full,
- .speed = 10000,
- },
- [MLX5E_40GBASE_SR4] = {
- .supported = SUPPORTED_40000baseSR4_Full,
- .advertised = ADVERTISED_40000baseSR4_Full,
- .speed = 40000,
- },
- [MLX5E_40GBASE_LR4] = {
- .supported = SUPPORTED_40000baseLR4_Full,
- .advertised = ADVERTISED_40000baseLR4_Full,
- .speed = 40000,
- },
- [MLX5E_100GBASE_CR4] = {
- .speed = 100000,
- },
- [MLX5E_100GBASE_SR4] = {
- .speed = 100000,
- },
- [MLX5E_100GBASE_KR4] = {
- .speed = 100000,
- },
- [MLX5E_100GBASE_LR4] = {
- .speed = 100000,
- },
- [MLX5E_100BASE_TX] = {
- .speed = 100,
- },
- [MLX5E_1000BASE_T] = {
- .supported = SUPPORTED_1000baseT_Full,
- .advertised = ADVERTISED_1000baseT_Full,
- .speed = 1000,
- },
- [MLX5E_10GBASE_T] = {
- .supported = SUPPORTED_10000baseT_Full,
- .advertised = ADVERTISED_10000baseT_Full,
- .speed = 1000,
- },
- [MLX5E_25GBASE_CR] = {
- .speed = 25000,
- },
- [MLX5E_25GBASE_KR] = {
- .speed = 25000,
- },
- [MLX5E_25GBASE_SR] = {
- .speed = 25000,
- },
- [MLX5E_50GBASE_CR2] = {
- .speed = 50000,
- },
- [MLX5E_50GBASE_KR2] = {
- .speed = 50000,
- },
};
+static struct ptys2ethtool_config ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER];
+
+#define MLX5_BUILD_PTYS2ETHTOOL_CONFIG(reg_, speed_, ...) \
+ ({ \
+ struct ptys2ethtool_config *cfg; \
+ const unsigned int modes[] = { __VA_ARGS__ }; \
+ unsigned int i; \
+ cfg = &ptys2ethtool_table[reg_]; \
+ cfg->speed = speed_; \
+ bitmap_zero(cfg->supported, \
+ __ETHTOOL_LINK_MODE_MASK_NBITS); \
+ bitmap_zero(cfg->advertised, \
+ __ETHTOOL_LINK_MODE_MASK_NBITS); \
+ for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
+ __set_bit(modes[i], cfg->supported); \
+ __set_bit(modes[i], cfg->advertised); \
+ } \
+ })
+
+void mlx5e_build_ptys2ethtool_map(void)
+{
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_CX_SGMII, SPEED_1000,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_1000BASE_KX, SPEED_1000,
+ ETHTOOL_LINK_MODE_1000baseKX_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CX4, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KX4, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_KR, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_20GBASE_KR2, SPEED_20000,
+ ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_CR4, SPEED_40000,
+ ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_KR4, SPEED_40000,
+ ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_56GBASE_R4, SPEED_56000,
+ ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_CR, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_SR, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_ER, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseKR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_SR4, SPEED_40000,
+ ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_40GBASE_LR4, SPEED_40000,
+ ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_SR2, SPEED_50000,
+ ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_CR4, SPEED_100000,
+ ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_SR4, SPEED_100000,
+ ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_KR4, SPEED_100000,
+ ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_100GBASE_LR4, SPEED_100000,
+ ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_10GBASE_T, SPEED_10000,
+ ETHTOOL_LINK_MODE_10000baseT_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_CR, SPEED_25000,
+ ETHTOOL_LINK_MODE_25000baseCR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_KR, SPEED_25000,
+ ETHTOOL_LINK_MODE_25000baseKR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_25GBASE_SR, SPEED_25000,
+ ETHTOOL_LINK_MODE_25000baseSR_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_CR2, SPEED_50000,
+ ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT);
+ MLX5_BUILD_PTYS2ETHTOOL_CONFIG(MLX5E_50GBASE_KR2, SPEED_50000,
+ ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
+}
+
+static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 pfc_en_tx;
+ u8 pfc_en_rx;
+ int err;
+
+ err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
+
+ return err ? 0 : pfc_en_tx | pfc_en_rx;
+}
+
+static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 rx_pause;
+ u32 tx_pause;
+ int err;
+
+ err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
+
+ return err ? false : rx_pause | tx_pause;
+}
+
+#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
+#define MLX5E_NUM_RQ_STATS(priv) \
+ (NUM_RQ_STATS * priv->params.num_channels * \
+ test_bit(MLX5E_STATE_OPENED, &priv->state))
+#define MLX5E_NUM_SQ_STATS(priv) \
+ (NUM_SQ_STATS * priv->params.num_channels * priv->params.num_tc * \
+ test_bit(MLX5E_STATE_OPENED, &priv->state))
+#define MLX5E_NUM_PFC_COUNTERS(priv) \
+ ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
+ NUM_PPORT_PER_PRIO_PFC_COUNTERS)
+
static int mlx5e_get_sset_count(struct net_device *dev, int sset)
{
struct mlx5e_priv *priv = netdev_priv(dev);
switch (sset) {
case ETH_SS_STATS:
- return NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
- priv->params.num_channels * NUM_RQ_STATS +
- priv->params.num_channels * priv->params.num_tc *
- NUM_SQ_STATS;
+ return NUM_SW_COUNTERS +
+ MLX5E_NUM_Q_CNTRS(priv) +
+ NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
+ MLX5E_NUM_RQ_STATS(priv) +
+ MLX5E_NUM_SQ_STATS(priv) +
+ MLX5E_NUM_PFC_COUNTERS(priv);
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(mlx5e_priv_flags);
/* fallthrough */
default:
return -EOPNOTSUPP;
}
}
+static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
+{
+ int i, j, tc, prio, idx = 0;
+ unsigned long pfc_combined;
+
+ /* SW counters */
+ for (i = 0; i < NUM_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
+
+ /* Q counters */
+ for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
+
+ /* VPORT counters */
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vport_stats_desc[i].format);
+
+ /* PPORT counters */
+ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_802_3_stats_desc[i].format);
+
+ for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_2863_stats_desc[i].format);
+
+ for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_2819_stats_desc[i].format);
+
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_traffic_stats_desc[i].format, prio);
+ }
+
+ pfc_combined = mlx5e_query_pfc_combined(priv);
+ for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ char pfc_string[ETH_GSTRING_LEN];
+
+ snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, pfc_string);
+ }
+ }
+
+ if (mlx5e_query_global_pause_combined(priv)) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, "global");
+ }
+ }
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return;
+
+ /* per channel counters */
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ rq_stats_desc[j].format, i);
+
+ for (tc = 0; tc < priv->params.num_tc; tc++)
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ sq_stats_desc[j].format,
+ priv->channeltc_to_txq_map[i][tc]);
+}
+
static void mlx5e_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
- int i, j, tc, idx = 0;
struct mlx5e_priv *priv = netdev_priv(dev);
+ int i;
switch (stringset) {
case ETH_SS_PRIV_FLAGS:
+ for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++)
+ strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]);
break;
case ETH_SS_TEST:
break;
case ETH_SS_STATS:
- /* VPORT counters */
- for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_strings[i]);
-
- /* PPORT counters */
- for (i = 0; i < NUM_PPORT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_strings[i]);
-
- /* per channel counters */
- for (i = 0; i < priv->params.num_channels; i++)
- for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- "rx%d_%s", i, rq_stats_strings[j]);
-
- for (tc = 0; tc < priv->params.num_tc; tc++)
- for (i = 0; i < priv->params.num_channels; i++)
- for (j = 0; j < NUM_SQ_STATS; j++)
- sprintf(data +
- (idx++) * ETH_GSTRING_LEN,
- "tx%d_%s",
- priv->channeltc_to_txq_map[i][tc],
- sq_stats_strings[j]);
+ mlx5e_fill_stats_strings(priv, data);
break;
}
}
@@ -227,7 +279,8 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int i, j, tc, idx = 0;
+ int i, j, tc, prio, idx = 0;
+ unsigned long pfc_combined;
if (!data)
return;
@@ -237,35 +290,123 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
mlx5e_update_stats(priv);
mutex_unlock(&priv->state_lock);
+ for (i = 0; i < NUM_SW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+ sw_stats_desc, i);
+
+ for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
+ data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
+ q_stats_desc, i);
+
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- data[idx++] = ((u64 *)&priv->stats.vport)[i];
+ data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+ vport_stats_desc, i);
+
+ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
+ pport_802_3_stats_desc, i);
+
+ for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
+ pport_2863_stats_desc, i);
+
+ for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
+ pport_2819_stats_desc, i);
+
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+ pport_per_prio_traffic_stats_desc, i);
+ }
- for (i = 0; i < NUM_PPORT_COUNTERS; i++)
- data[idx++] = be64_to_cpu(((__be64 *)&priv->stats.pport)[i]);
+ pfc_combined = mlx5e_query_pfc_combined(priv);
+ for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+ pport_per_prio_pfc_stats_desc, i);
+ }
+ }
+
+ if (mlx5e_query_global_pause_combined(priv)) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
+ pport_per_prio_pfc_stats_desc, i);
+ }
+ }
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return;
/* per channel counters */
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_RQ_STATS; j++)
- data[idx++] = !test_bit(MLX5E_STATE_OPENED,
- &priv->state) ? 0 :
- ((u64 *)&priv->channel[i]->rq.stats)[j];
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&priv->channel[i]->rq.stats,
+ rq_stats_desc, j);
for (tc = 0; tc < priv->params.num_tc; tc++)
for (i = 0; i < priv->params.num_channels; i++)
for (j = 0; j < NUM_SQ_STATS; j++)
- data[idx++] = !test_bit(MLX5E_STATE_OPENED,
- &priv->state) ? 0 :
- ((u64 *)&priv->channel[i]->sq[tc].stats)[j];
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->channel[i]->sq[tc].stats,
+ sq_stats_desc, j);
+}
+
+static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
+ int num_wqe)
+{
+ int packets_per_wqe;
+ int stride_size;
+ int num_strides;
+ int wqe_size;
+
+ if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ return num_wqe;
+
+ stride_size = 1 << priv->params.mpwqe_log_stride_sz;
+ num_strides = 1 << priv->params.mpwqe_log_num_strides;
+ wqe_size = stride_size * num_strides;
+
+ packets_per_wqe = wqe_size /
+ ALIGN(ETH_DATA_LEN, stride_size);
+ return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1));
+}
+
+static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
+ int num_packets)
+{
+ int packets_per_wqe;
+ int stride_size;
+ int num_strides;
+ int wqe_size;
+ int num_wqes;
+
+ if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ return num_packets;
+
+ stride_size = 1 << priv->params.mpwqe_log_stride_sz;
+ num_strides = 1 << priv->params.mpwqe_log_num_strides;
+ wqe_size = stride_size * num_strides;
+
+ num_packets = (1 << order_base_2(num_packets));
+
+ packets_per_wqe = wqe_size /
+ ALIGN(ETH_DATA_LEN, stride_size);
+ num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe);
+ return 1 << (order_base_2(num_wqes));
}
static void mlx5e_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ int rq_wq_type = priv->params.rq_wq_type;
- param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << mlx5_max_log_rq_size(rq_wq_type));
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
- param->rx_pending = 1 << priv->params.log_rq_size;
+ param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << priv->params.log_rq_size);
param->tx_pending = 1 << priv->params.log_sq_size;
}
@@ -274,9 +415,14 @@ static int mlx5e_set_ringparam(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
bool was_opened;
+ int rq_wq_type = priv->params.rq_wq_type;
+ u32 rx_pending_wqes;
+ u32 min_rq_size;
+ u32 max_rq_size;
u16 min_rx_wqes;
u8 log_rq_size;
u8 log_sq_size;
+ u32 num_mtts;
int err = 0;
if (param->rx_jumbo_pending) {
@@ -289,18 +435,36 @@ static int mlx5e_set_ringparam(struct net_device *dev,
__func__);
return -EINVAL;
}
- if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+
+ min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << mlx5_min_log_rq_size(rq_wq_type));
+ max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << mlx5_max_log_rq_size(rq_wq_type));
+ rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
+ param->rx_pending);
+
+ if (param->rx_pending < min_rq_size) {
netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending,
- 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ min_rq_size);
return -EINVAL;
}
- if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) {
+ if (param->rx_pending > max_rq_size) {
netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
__func__, param->rx_pending,
- 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE);
+ max_rq_size);
+ return -EINVAL;
+ }
+
+ num_mtts = MLX5E_REQUIRED_MTTS(priv->params.num_channels,
+ rx_pending_wqes);
+ if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ !MLX5E_VALID_NUM_MTTS(num_mtts)) {
+ netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
+ __func__, param->rx_pending);
return -EINVAL;
}
+
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
__func__, param->tx_pending,
@@ -314,10 +478,9 @@ static int mlx5e_set_ringparam(struct net_device *dev,
return -EINVAL;
}
- log_rq_size = order_base_2(param->rx_pending);
+ log_rq_size = order_base_2(rx_pending_wqes);
log_sq_size = order_base_2(param->tx_pending);
- min_rx_wqes = min_t(u16, param->rx_pending - 1,
- MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+ min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);
if (log_rq_size == priv->params.log_rq_size &&
log_sq_size == priv->params.log_sq_size &&
@@ -357,7 +520,9 @@ static int mlx5e_set_channels(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
int ncv = mlx5e_get_max_num_channels(priv->mdev);
unsigned int count = ch->combined_count;
+ bool arfs_enabled;
bool was_opened;
+ u32 num_mtts;
int err = 0;
if (!count) {
@@ -376,6 +541,14 @@ static int mlx5e_set_channels(struct net_device *dev,
return -EINVAL;
}
+ num_mtts = MLX5E_REQUIRED_MTTS(count, BIT(priv->params.log_rq_size));
+ if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ !MLX5E_VALID_NUM_MTTS(num_mtts)) {
+ netdev_info(dev, "%s: rx count (%d) request can't be satisfied, try to reduce.\n",
+ __func__, count);
+ return -EINVAL;
+ }
+
if (priv->params.num_channels == count)
return 0;
@@ -385,13 +558,27 @@ static int mlx5e_set_channels(struct net_device *dev,
if (was_opened)
mlx5e_close_locked(dev);
+ arfs_enabled = dev->features & NETIF_F_NTUPLE;
+ if (arfs_enabled)
+ mlx5e_arfs_disable(priv);
+
priv->params.num_channels = count;
- mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
+ mlx5e_build_default_indir_rqt(priv->mdev, priv->params.indirection_rqt,
MLX5E_INDIR_RQT_SIZE, count);
if (was_opened)
err = mlx5e_open_locked(dev);
+ if (err)
+ goto out;
+ if (arfs_enabled) {
+ err = mlx5e_arfs_enable(priv);
+ if (err)
+ netdev_err(dev, "%s: mlx5e_arfs_enable failed: %d\n",
+ __func__, err);
+ }
+
+out:
mutex_unlock(&priv->state_lock);
return err;
@@ -405,10 +592,11 @@ static int mlx5e_get_coalesce(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, cq_moderation))
return -ENOTSUPP;
- coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
- coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
- coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
- coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts;
+ coal->rx_coalesce_usecs = priv->params.rx_cq_moderation.usec;
+ coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation.pkts;
+ coal->tx_coalesce_usecs = priv->params.tx_cq_moderation.usec;
+ coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation.pkts;
+ coal->use_adaptive_rx_coalesce = priv->params.rx_am_enabled;
return 0;
}
@@ -419,6 +607,10 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channel *c;
+ bool restart =
+ !!coal->use_adaptive_rx_coalesce != priv->params.rx_am_enabled;
+ bool was_opened;
+ int err = 0;
int tc;
int i;
@@ -426,12 +618,19 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
return -ENOTSUPP;
mutex_lock(&priv->state_lock);
- priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
- priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
- priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
- priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened && restart) {
+ mlx5e_close_locked(netdev);
+ priv->params.rx_am_enabled = !!coal->use_adaptive_rx_coalesce;
+ }
+
+ priv->params.tx_cq_moderation.usec = coal->tx_coalesce_usecs;
+ priv->params.tx_cq_moderation.pkts = coal->tx_max_coalesced_frames;
+ priv->params.rx_cq_moderation.usec = coal->rx_coalesce_usecs;
+ priv->params.rx_cq_moderation.pkts = coal->rx_max_coalesced_frames;
+
+ if (!was_opened || restart)
goto out;
for (i = 0; i < priv->params.num_channels; ++i) {
@@ -450,35 +649,39 @@ static int mlx5e_set_coalesce(struct net_device *netdev,
}
out:
+ if (was_opened && restart)
+ err = mlx5e_open_locked(netdev);
+
mutex_unlock(&priv->state_lock);
- return 0;
+ return err;
}
-static u32 ptys2ethtool_supported_link(u32 eth_proto_cap)
+static void ptys2ethtool_supported_link(unsigned long *supported_modes,
+ u32 eth_proto_cap)
{
- int i;
- u32 supported_modes = 0;
+ unsigned long proto_cap = eth_proto_cap;
+ int proto;
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (eth_proto_cap & MLX5E_PROT_MASK(i))
- supported_modes |= ptys2ethtool_table[i].supported;
- }
- return supported_modes;
+ for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
+ bitmap_or(supported_modes, supported_modes,
+ ptys2ethtool_table[proto].supported,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static u32 ptys2ethtool_adver_link(u32 eth_proto_cap)
+static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
+ u32 eth_proto_cap)
{
- int i;
- u32 advertising_modes = 0;
+ unsigned long proto_cap = eth_proto_cap;
+ int proto;
- for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (eth_proto_cap & MLX5E_PROT_MASK(i))
- advertising_modes |= ptys2ethtool_table[i].advertised;
- }
- return advertising_modes;
+ for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
+ bitmap_or(advertising_modes, advertising_modes,
+ ptys2ethtool_table[proto].advertised,
+ __ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
+static void ptys2ethtool_supported_port(struct ethtool_link_ksettings *link_ksettings,
+ u32 eth_proto_cap)
{
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
@@ -486,7 +689,7 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
| MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
| MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
| MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
- return SUPPORTED_FIBRE;
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported, FIBRE);
}
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
@@ -494,14 +697,32 @@ static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
| MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
| MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
- return SUPPORTED_Backplane;
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Backplane);
}
+}
+
+int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed)
+{
+ u32 max_speed = 0;
+ u32 proto_cap;
+ int err;
+ int i;
+
+ err = mlx5_query_port_proto_cap(mdev, &proto_cap, MLX5_PTYS_EN);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i)
+ if (proto_cap & MLX5E_PROT_MASK(i))
+ max_speed = max(max_speed, ptys2ethtool_table[i].speed);
+
+ *speed = max_speed;
return 0;
}
static void get_speed_duplex(struct net_device *netdev,
u32 eth_proto_oper,
- struct ethtool_cmd *cmd)
+ struct ethtool_link_ksettings *link_ksettings)
{
int i;
u32 speed = SPEED_UNKNOWN;
@@ -518,23 +739,32 @@ static void get_speed_duplex(struct net_device *netdev,
}
}
out:
- ethtool_cmd_speed_set(cmd, speed);
- cmd->duplex = duplex;
+ link_ksettings->base.speed = speed;
+ link_ksettings->base.duplex = duplex;
}
-static void get_supported(u32 eth_proto_cap, u32 *supported)
+static void get_supported(u32 eth_proto_cap,
+ struct ethtool_link_ksettings *link_ksettings)
{
- *supported |= ptys2ethtool_supported_port(eth_proto_cap);
- *supported |= ptys2ethtool_supported_link(eth_proto_cap);
- *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ unsigned long *supported = link_ksettings->link_modes.supported;
+
+ ptys2ethtool_supported_port(link_ksettings, eth_proto_cap);
+ ptys2ethtool_supported_link(supported, eth_proto_cap);
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Asym_Pause);
}
static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
- u8 rx_pause, u32 *advertising)
+ u8 rx_pause,
+ struct ethtool_link_ksettings *link_ksettings)
{
- *advertising |= ptys2ethtool_adver_link(eth_proto_cap);
- *advertising |= tx_pause ? ADVERTISED_Pause : 0;
- *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0;
+ unsigned long *advertising = link_ksettings->link_modes.advertising;
+
+ ptys2ethtool_adver_link(advertising, eth_proto_cap);
+ if (tx_pause)
+ ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
+ if (tx_pause ^ rx_pause)
+ ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Asym_Pause);
}
static u8 get_connector_port(u32 eth_proto)
@@ -562,13 +792,16 @@ static u8 get_connector_port(u32 eth_proto)
return PORT_OTHER;
}
-static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising)
+static void get_lp_advertising(u32 eth_proto_lp,
+ struct ethtool_link_ksettings *link_ksettings)
{
- *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp);
+ unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
+
+ ptys2ethtool_adver_link(lp_advertising, eth_proto_lp);
}
-static int mlx5e_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int mlx5e_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
@@ -577,6 +810,8 @@ static int mlx5e_get_settings(struct net_device *netdev,
u32 eth_proto_admin;
u32 eth_proto_lp;
u32 eth_proto_oper;
+ u8 an_disable_admin;
+ u8 an_status;
int err;
err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1);
@@ -587,35 +822,49 @@ static int mlx5e_get_settings(struct net_device *netdev,
goto err_query_ptys;
}
- eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
- eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
- eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
- eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+ eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+ eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+ eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+ an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
+ an_status = MLX5_GET(ptys_reg, out, an_status);
- cmd->supported = 0;
- cmd->advertising = 0;
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
+ ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
- get_supported(eth_proto_cap, &cmd->supported);
- get_advertising(eth_proto_admin, 0, 0, &cmd->advertising);
- get_speed_duplex(netdev, eth_proto_oper, cmd);
+ get_supported(eth_proto_cap, link_ksettings);
+ get_advertising(eth_proto_admin, 0, 0, link_ksettings);
+ get_speed_duplex(netdev, eth_proto_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
- cmd->port = get_connector_port(eth_proto_oper);
- get_lp_advertising(eth_proto_lp, &cmd->lp_advertising);
+ link_ksettings->base.port = get_connector_port(eth_proto_oper);
+ get_lp_advertising(eth_proto_lp, link_ksettings);
- cmd->transceiver = XCVR_INTERNAL;
+ if (an_status == MLX5_AN_COMPLETE)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ lp_advertising, Autoneg);
+
+ link_ksettings->base.autoneg = an_disable_admin ? AUTONEG_DISABLE :
+ AUTONEG_ENABLE;
+ ethtool_link_ksettings_add_link_mode(link_ksettings, supported,
+ Autoneg);
+ if (!an_disable_admin)
+ ethtool_link_ksettings_add_link_mode(link_ksettings,
+ advertising, Autoneg);
err_query_ptys:
return err;
}
-static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes)
+static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{
u32 i, ptys_modes = 0;
for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
- if (ptys2ethtool_table[i].advertised & link_modes)
+ if (bitmap_intersects(ptys2ethtool_table[i].advertised,
+ link_modes,
+ __ETHTOOL_LINK_MODE_MASK_NBITS))
ptys_modes |= MLX5E_PROT_MASK(i);
}
@@ -634,21 +883,25 @@ static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
return speed_links;
}
-static int mlx5e_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
+static int mlx5e_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *link_ksettings)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ u32 eth_proto_cap, eth_proto_admin;
+ bool an_changes = false;
+ u8 an_disable_admin;
+ u8 an_disable_cap;
+ bool an_disable;
u32 link_modes;
+ u8 an_status;
u32 speed;
- u32 eth_proto_cap, eth_proto_admin;
- enum mlx5_port_status ps;
int err;
- speed = ethtool_cmd_speed(cmd);
+ speed = link_ksettings->base.speed;
- link_modes = cmd->autoneg == AUTONEG_ENABLE ?
- mlx5e_ethtool2ptys_adver_link(cmd->advertising) :
+ link_modes = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
+ mlx5e_ethtool2ptys_adver_link(link_ksettings->link_modes.advertising) :
mlx5e_ethtool2ptys_speed_link(speed);
err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
@@ -673,15 +926,18 @@ static int mlx5e_set_settings(struct net_device *netdev,
goto out;
}
- if (link_modes == eth_proto_admin)
+ mlx5_query_port_autoneg(mdev, MLX5_PTYS_EN, &an_status,
+ &an_disable_cap, &an_disable_admin);
+
+ an_disable = link_ksettings->base.autoneg == AUTONEG_DISABLE;
+ an_changes = ((!an_disable && an_disable_admin) ||
+ (an_disable && !an_disable_admin));
+
+ if (!an_changes && link_modes == eth_proto_admin)
goto out;
- mlx5_query_port_admin_status(mdev, &ps);
- if (ps == MLX5_PORT_UP)
- mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
- mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
- if (ps == MLX5_PORT_UP)
- mlx5_set_port_admin_status(mdev, MLX5_PORT_UP);
+ mlx5_set_port_ptys(mdev, an_disable, link_modes, MLX5_PTYS_EN);
+ mlx5_toggle_port_link(mdev);
out:
return err;
@@ -727,9 +983,8 @@ static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
MLX5_SET(modify_tir_in, in, bitmask.hash, 1);
mlx5e_build_tir_ctx_hash(tirc, priv);
- for (i = 0; i < MLX5E_NUM_TT; i++)
- if (IS_HASHING_TT(i))
- mlx5_core_modify_tir(mdev, priv->tirn[i], in, inlen);
+ for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
+ mlx5_core_modify_tir(mdev, priv->indir_tir[i].tirn, in, inlen);
}
static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
@@ -751,9 +1006,11 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
mutex_lock(&priv->state_lock);
if (indir) {
+ u32 rqtn = priv->indir_rqt.rqtn;
+
memcpy(priv->params.indirection_rqt, indir,
sizeof(priv->params.indirection_rqt));
- mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
+ mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
}
if (key)
@@ -782,6 +1039,15 @@ static int mlx5e_get_rxnfc(struct net_device *netdev,
case ETHTOOL_GRXRINGS:
info->data = priv->params.num_channels;
break;
+ case ETHTOOL_GRXCLSRLCNT:
+ info->rule_cnt = priv->fs.ethtool.tot_num_rules;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
+ break;
default:
err = -EOPNOTSUPP;
break;
@@ -1036,6 +1302,209 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
+static int mlx5e_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 beacon_duration;
+
+ if (!MLX5_CAP_GEN(mdev, beacon_led))
+ return -EOPNOTSUPP;
+
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ beacon_duration = MLX5_BEACON_DURATION_INF;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ beacon_duration = MLX5_BEACON_DURATION_OFF;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return mlx5_set_port_beacon(mdev, beacon_duration);
+}
+
+static int mlx5e_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *dev = priv->mdev;
+ int size_read = 0;
+ u8 data[4];
+
+ size_read = mlx5_query_module_eeprom(dev, 0, 2, data);
+ if (size_read < 2)
+ return -EIO;
+
+ /* data[0] = identifier byte */
+ switch (data[0]) {
+ case MLX5_MODULE_ID_QSFP:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ break;
+ case MLX5_MODULE_ID_QSFP_PLUS:
+ case MLX5_MODULE_ID_QSFP28:
+ /* data[1] = revision id */
+ if (data[0] == MLX5_MODULE_ID_QSFP28 || data[1] >= 0x3) {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
+ }
+ break;
+ case MLX5_MODULE_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ default:
+ netdev_err(priv->netdev, "%s: cable type not recognized:0x%x\n",
+ __func__, data[0]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlx5e_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee,
+ u8 *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int offset = ee->offset;
+ int size_read;
+ int i = 0;
+
+ if (!ee->len)
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ while (i < ee->len) {
+ size_read = mlx5_query_module_eeprom(mdev, offset, ee->len - i,
+ data + i);
+
+ if (!size_read)
+ /* Done reading */
+ return 0;
+
+ if (size_read < 0) {
+ netdev_err(priv->netdev, "%s: mlx5_query_eeprom failed:0x%x\n",
+ __func__, size_read);
+ return 0;
+ }
+
+ i += size_read;
+ offset += size_read;
+ }
+
+ return 0;
+}
+
+typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
+
+static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ bool rx_mode_changed;
+ u8 rx_cq_period_mode;
+ int err = 0;
+ bool reset;
+
+ rx_cq_period_mode = enable ?
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+ MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ rx_mode_changed = rx_cq_period_mode != priv->params.rx_cq_period_mode;
+
+ if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
+ !MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
+ return -ENOTSUPP;
+
+ if (!rx_mode_changed)
+ return 0;
+
+ reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (reset)
+ mlx5e_close_locked(netdev);
+
+ mlx5e_set_rx_cq_mode_params(&priv->params, rx_cq_period_mode);
+
+ if (reset)
+ err = mlx5e_open_locked(netdev);
+
+ return err;
+}
+
+static int mlx5e_handle_pflag(struct net_device *netdev,
+ u32 wanted_flags,
+ enum mlx5e_priv_flag flag,
+ mlx5e_pflag_handler pflag_handler)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ bool enable = !!(wanted_flags & flag);
+ u32 changes = wanted_flags ^ priv->pflags;
+ int err;
+
+ if (!(changes & flag))
+ return 0;
+
+ err = pflag_handler(netdev, enable);
+ if (err) {
+ netdev_err(netdev, "%s private flag 0x%x failed err %d\n",
+ enable ? "Enable" : "Disable", flag, err);
+ return err;
+ }
+
+ MLX5E_SET_PRIV_FLAG(priv, flag, enable);
+ return 0;
+}
+
+static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ err = mlx5e_handle_pflag(netdev, pflags,
+ MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ set_pflag_rx_cqe_based_moder);
+
+ mutex_unlock(&priv->state_lock);
+ return err ? -EINVAL : 0;
+}
+
+static u32 mlx5e_get_priv_flags(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ return priv->pflags;
+}
+
+static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int err = 0;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
const struct ethtool_ops mlx5e_ethtool_ops = {
.get_drvinfo = mlx5e_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1048,18 +1517,24 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.set_channels = mlx5e_set_channels,
.get_coalesce = mlx5e_get_coalesce,
.set_coalesce = mlx5e_set_coalesce,
- .get_settings = mlx5e_get_settings,
- .set_settings = mlx5e_set_settings,
+ .get_link_ksettings = mlx5e_get_link_ksettings,
+ .set_link_ksettings = mlx5e_set_link_ksettings,
.get_rxfh_key_size = mlx5e_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh,
.get_rxnfc = mlx5e_get_rxnfc,
+ .set_rxnfc = mlx5e_set_rxnfc,
.get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable,
.get_pauseparam = mlx5e_get_pauseparam,
.set_pauseparam = mlx5e_set_pauseparam,
.get_ts_info = mlx5e_get_ts_info,
+ .set_phys_id = mlx5e_set_phys_id,
.get_wol = mlx5e_get_wol,
.set_wol = mlx5e_set_wol,
+ .get_module_info = mlx5e_get_module_info,
+ .get_module_eeprom = mlx5e_get_module_eeprom,
+ .get_priv_flags = mlx5e_get_priv_flags,
+ .set_priv_flags = mlx5e_set_priv_flags
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index d00a24203410..1587a9fd5724 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -37,7 +37,10 @@
#include <linux/mlx5/fs.h>
#include "en.h"
-#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5e_l2_rule *ai, int type);
+static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5e_l2_rule *ai);
enum {
MLX5E_FULLMATCH = 0,
@@ -58,21 +61,21 @@ enum {
MLX5E_ACTION_DEL = 2,
};
-struct mlx5e_eth_addr_hash_node {
+struct mlx5e_l2_hash_node {
struct hlist_node hlist;
u8 action;
- struct mlx5e_eth_addr_info ai;
+ struct mlx5e_l2_rule ai;
};
-static inline int mlx5e_hash_eth_addr(u8 *addr)
+static inline int mlx5e_hash_l2(u8 *addr)
{
return addr[5];
}
-static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
+static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
{
- struct mlx5e_eth_addr_hash_node *hn;
- int ix = mlx5e_hash_eth_addr(addr);
+ struct mlx5e_l2_hash_node *hn;
+ int ix = mlx5e_hash_l2(addr);
int found = 0;
hlist_for_each_entry(hn, &hash[ix], hlist)
@@ -96,371 +99,12 @@ static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
hlist_add_head(&hn->hlist, &hash[ix]);
}
-static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
+static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
{
hlist_del(&hn->hlist);
kfree(hn);
}
-static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
- struct mlx5e_eth_addr_info *ai)
-{
- if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV6_TCP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_TCP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV4_TCP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_TCP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV6_UDP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6_UDP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV4_UDP))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4_UDP]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV6))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV6]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_IPV4))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_IPV4]);
-
- if (ai->tt_vec & BIT(MLX5E_TT_ANY))
- mlx5_del_flow_rule(ai->ft_rule[MLX5E_TT_ANY]);
-}
-
-static int mlx5e_get_eth_addr_type(u8 *addr)
-{
- if (is_unicast_ether_addr(addr))
- return MLX5E_UC;
-
- if ((addr[0] == 0x01) &&
- (addr[1] == 0x00) &&
- (addr[2] == 0x5e) &&
- !(addr[3] & 0x80))
- return MLX5E_MC_IPV4;
-
- if ((addr[0] == 0x33) &&
- (addr[1] == 0x33))
- return MLX5E_MC_IPV6;
-
- return MLX5E_MC_OTHER;
-}
-
-static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
-{
- int eth_addr_type;
- u32 ret;
-
- switch (type) {
- case MLX5E_FULLMATCH:
- eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
- switch (eth_addr_type) {
- case MLX5E_UC:
- ret =
- BIT(MLX5E_TT_IPV4_TCP) |
- BIT(MLX5E_TT_IPV6_TCP) |
- BIT(MLX5E_TT_IPV4_UDP) |
- BIT(MLX5E_TT_IPV6_UDP) |
- BIT(MLX5E_TT_IPV4_IPSEC_AH) |
- BIT(MLX5E_TT_IPV6_IPSEC_AH) |
- BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
- BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
- BIT(MLX5E_TT_IPV4) |
- BIT(MLX5E_TT_IPV6) |
- BIT(MLX5E_TT_ANY) |
- 0;
- break;
-
- case MLX5E_MC_IPV4:
- ret =
- BIT(MLX5E_TT_IPV4_UDP) |
- BIT(MLX5E_TT_IPV4) |
- 0;
- break;
-
- case MLX5E_MC_IPV6:
- ret =
- BIT(MLX5E_TT_IPV6_UDP) |
- BIT(MLX5E_TT_IPV6) |
- 0;
- break;
-
- case MLX5E_MC_OTHER:
- ret =
- BIT(MLX5E_TT_ANY) |
- 0;
- break;
- }
-
- break;
-
- case MLX5E_ALLMULTI:
- ret =
- BIT(MLX5E_TT_IPV4_UDP) |
- BIT(MLX5E_TT_IPV6_UDP) |
- BIT(MLX5E_TT_IPV4) |
- BIT(MLX5E_TT_IPV6) |
- BIT(MLX5E_TT_ANY) |
- 0;
- break;
-
- default: /* MLX5E_PROMISC */
- ret =
- BIT(MLX5E_TT_IPV4_TCP) |
- BIT(MLX5E_TT_IPV6_TCP) |
- BIT(MLX5E_TT_IPV4_UDP) |
- BIT(MLX5E_TT_IPV6_UDP) |
- BIT(MLX5E_TT_IPV4_IPSEC_AH) |
- BIT(MLX5E_TT_IPV6_IPSEC_AH) |
- BIT(MLX5E_TT_IPV4_IPSEC_ESP) |
- BIT(MLX5E_TT_IPV6_IPSEC_ESP) |
- BIT(MLX5E_TT_IPV4) |
- BIT(MLX5E_TT_IPV6) |
- BIT(MLX5E_TT_ANY) |
- 0;
- break;
- }
-
- return ret;
-}
-
-static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
- struct mlx5e_eth_addr_info *ai,
- int type, u32 *mc, u32 *mv)
-{
- struct mlx5_flow_destination dest;
- u8 match_criteria_enable = 0;
- struct mlx5_flow_rule **rule_p;
- struct mlx5_flow_table *ft = priv->fts.main.t;
- u8 *mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
- outer_headers.dmac_47_16);
- u8 *mv_dmac = MLX5_ADDR_OF(fte_match_param, mv,
- outer_headers.dmac_47_16);
- u32 *tirn = priv->tirn;
- u32 tt_vec;
- int err = 0;
-
- dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
-
- switch (type) {
- case MLX5E_FULLMATCH:
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- eth_broadcast_addr(mc_dmac);
- ether_addr_copy(mv_dmac, ai->addr);
- break;
-
- case MLX5E_ALLMULTI:
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- mc_dmac[0] = 0x01;
- mv_dmac[0] = 0x01;
- break;
-
- case MLX5E_PROMISC:
- break;
- }
-
- tt_vec = mlx5e_get_tt_vec(ai, type);
-
- if (tt_vec & BIT(MLX5E_TT_ANY)) {
- rule_p = &ai->ft_rule[MLX5E_TT_ANY];
- dest.tir_num = tirn[MLX5E_TT_ANY];
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_ANY);
- }
-
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
-
- if (tt_vec & BIT(MLX5E_TT_IPV4)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV4];
- dest.tir_num = tirn[MLX5E_TT_IPV4];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IP);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV4);
- }
-
- if (tt_vec & BIT(MLX5E_TT_IPV6)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV6];
- dest.tir_num = tirn[MLX5E_TT_IPV6];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV6);
- }
-
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
- MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_UDP);
-
- if (tt_vec & BIT(MLX5E_TT_IPV4_UDP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV4_UDP];
- dest.tir_num = tirn[MLX5E_TT_IPV4_UDP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IP);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV4_UDP);
- }
-
- if (tt_vec & BIT(MLX5E_TT_IPV6_UDP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV6_UDP];
- dest.tir_num = tirn[MLX5E_TT_IPV6_UDP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV6_UDP);
- }
-
- MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_TCP);
-
- if (tt_vec & BIT(MLX5E_TT_IPV4_TCP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV4_TCP];
- dest.tir_num = tirn[MLX5E_TT_IPV4_TCP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IP);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV4_TCP);
- }
-
- if (tt_vec & BIT(MLX5E_TT_IPV6_TCP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV6_TCP];
- dest.tir_num = tirn[MLX5E_TT_IPV6_TCP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
-
- ai->tt_vec |= BIT(MLX5E_TT_IPV6_TCP);
- }
-
- MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_AH);
-
- if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_AH)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_AH];
- dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_AH];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IP);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_AH);
- }
-
- if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_AH)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_AH];
- dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_AH];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_AH);
- }
-
- MLX5_SET(fte_match_param, mv, outer_headers.ip_protocol, IPPROTO_ESP);
-
- if (tt_vec & BIT(MLX5E_TT_IPV4_IPSEC_ESP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV4_IPSEC_ESP];
- dest.tir_num = tirn[MLX5E_TT_IPV4_IPSEC_ESP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IP);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV4_IPSEC_ESP);
- }
-
- if (tt_vec & BIT(MLX5E_TT_IPV6_IPSEC_ESP)) {
- rule_p = &ai->ft_rule[MLX5E_TT_IPV6_IPSEC_ESP];
- dest.tir_num = tirn[MLX5E_TT_IPV6_IPSEC_ESP];
- MLX5_SET(fte_match_param, mv, outer_headers.ethertype,
- ETH_P_IPV6);
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
- MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
- MLX5_FS_DEFAULT_FLOW_TAG, &dest);
- if (IS_ERR_OR_NULL(*rule_p))
- goto err_del_ai;
- ai->tt_vec |= BIT(MLX5E_TT_IPV6_IPSEC_ESP);
- }
-
- return 0;
-
-err_del_ai:
- err = PTR_ERR(*rule_p);
- *rule_p = NULL;
- mlx5e_del_eth_addr_from_flow_table(priv, ai);
-
- return err;
-}
-
-static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
- struct mlx5e_eth_addr_info *ai, int type)
-{
- u32 *match_criteria;
- u32 *match_value;
- int err = 0;
-
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
- netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
- err = -ENOMEM;
- goto add_eth_addr_rule_out;
- }
-
- err = __mlx5e_add_eth_addr_rule(priv, ai, type, match_criteria,
- match_value);
-
-add_eth_addr_rule_out:
- kvfree(match_criteria);
- kvfree(match_value);
-
- return err;
-}
-
static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
{
struct net_device *ndev = priv->netdev;
@@ -472,7 +116,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i;
list_size = 0;
- for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID)
+ for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
list_size++;
max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
@@ -489,7 +133,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
return -ENOMEM;
i = 0;
- for_each_set_bit(vlan, priv->vlan.active_vlans, VLAN_N_VID) {
+ for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
if (i >= list_size)
break;
vlans[i++] = vlan;
@@ -512,37 +156,38 @@ enum mlx5e_vlan_rule_type {
static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type,
- u16 vid, u32 *mc, u32 *mv)
+ u16 vid, struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_table *ft = priv->fts.vlan.t;
+ struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_destination dest;
- u8 match_criteria_enable = 0;
struct mlx5_flow_rule **rule_p;
int err = 0;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
- dest.ft = priv->fts.main.t;
+ dest.ft = priv->fs.l2.ft.t;
- match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- rule_p = &priv->vlan.untagged_rule;
+ rule_p = &priv->fs.vlan.untagged_rule;
break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
- rule_p = &priv->vlan.any_vlan_rule;
- MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
+ rule_p = &priv->fs.vlan.any_vlan_rule;
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
- rule_p = &priv->vlan.active_vlans_rule[vid];
- MLX5_SET(fte_match_param, mv, outer_headers.vlan_tag, 1);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
- MLX5_SET(fte_match_param, mv, outer_headers.first_vid, vid);
+ rule_p = &priv->fs.vlan.active_vlans_rule[vid];
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
+ vid);
break;
}
- *rule_p = mlx5_add_flow_rule(ft, match_criteria_enable, mc, mv,
+ *rule_p = mlx5_add_flow_rule(ft, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
MLX5_FS_DEFAULT_FLOW_TAG,
&dest);
@@ -559,27 +204,21 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
enum mlx5e_vlan_rule_type rule_type, u16 vid)
{
- u32 *match_criteria;
- u32 *match_value;
+ struct mlx5_flow_spec *spec;
int err = 0;
- match_value = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
- if (!match_value || !match_criteria) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
- err = -ENOMEM;
- goto add_vlan_rule_out;
+ return -ENOMEM;
}
if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
mlx5e_vport_context_update_vlans(priv);
- err = __mlx5e_add_vlan_rule(priv, rule_type, vid, match_criteria,
- match_value);
+ err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
-add_vlan_rule_out:
- kvfree(match_criteria);
- kvfree(match_value);
+ kvfree(spec);
return err;
}
@@ -589,22 +228,22 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
{
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
- if (priv->vlan.untagged_rule) {
- mlx5_del_flow_rule(priv->vlan.untagged_rule);
- priv->vlan.untagged_rule = NULL;
+ if (priv->fs.vlan.untagged_rule) {
+ mlx5_del_flow_rule(priv->fs.vlan.untagged_rule);
+ priv->fs.vlan.untagged_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_ANY_VID:
- if (priv->vlan.any_vlan_rule) {
- mlx5_del_flow_rule(priv->vlan.any_vlan_rule);
- priv->vlan.any_vlan_rule = NULL;
+ if (priv->fs.vlan.any_vlan_rule) {
+ mlx5_del_flow_rule(priv->fs.vlan.any_vlan_rule);
+ priv->fs.vlan.any_vlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
mlx5e_vport_context_update_vlans(priv);
- if (priv->vlan.active_vlans_rule[vid]) {
- mlx5_del_flow_rule(priv->vlan.active_vlans_rule[vid]);
- priv->vlan.active_vlans_rule[vid] = NULL;
+ if (priv->fs.vlan.active_vlans_rule[vid]) {
+ mlx5_del_flow_rule(priv->fs.vlan.active_vlans_rule[vid]);
+ priv->fs.vlan.active_vlans_rule[vid] = NULL;
}
mlx5e_vport_context_update_vlans(priv);
break;
@@ -613,10 +252,10 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
{
- if (!priv->vlan.filter_disabled)
+ if (!priv->fs.vlan.filter_disabled)
return;
- priv->vlan.filter_disabled = false;
+ priv->fs.vlan.filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
@@ -624,10 +263,10 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
{
- if (priv->vlan.filter_disabled)
+ if (priv->fs.vlan.filter_disabled)
return;
- priv->vlan.filter_disabled = true;
+ priv->fs.vlan.filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
@@ -638,7 +277,7 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- set_bit(vid, priv->vlan.active_vlans);
+ set_bit(vid, priv->fs.vlan.active_vlans);
return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
}
@@ -648,7 +287,7 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- clear_bit(vid, priv->vlan.active_vlans);
+ clear_bit(vid, priv->fs.vlan.active_vlans);
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
@@ -656,21 +295,21 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
}
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
- for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
+ for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
-static void mlx5e_execute_action(struct mlx5e_priv *priv,
- struct mlx5e_eth_addr_hash_node *hn)
+static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
+ struct mlx5e_l2_hash_node *hn)
{
switch (hn->action) {
case MLX5E_ACTION_ADD:
- mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
hn->action = MLX5E_ACTION_NONE;
break;
case MLX5E_ACTION_DEL:
- mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
- mlx5e_del_eth_addr_from_hash(hn);
+ mlx5e_del_l2_flow_rule(priv, &hn->ai);
+ mlx5e_del_l2_from_hash(hn);
break;
}
}
@@ -682,14 +321,14 @@ static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
netif_addr_lock_bh(netdev);
- mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
- priv->netdev->dev_addr);
+ mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
+ priv->netdev->dev_addr);
netdev_for_each_uc_addr(ha, netdev)
- mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
+ mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
netdev_for_each_mc_addr(ha, netdev)
- mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
+ mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
netif_addr_unlock_bh(netdev);
}
@@ -699,17 +338,17 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
struct net_device *ndev = priv->netdev;
- struct mlx5e_eth_addr_hash_node *hn;
+ struct mlx5e_l2_hash_node *hn;
struct hlist_head *addr_list;
struct hlist_node *tmp;
int i = 0;
int hi;
- addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
+ addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
if (is_uc) /* Make sure our own address is pushed first */
ether_addr_copy(addr_array[i++], ndev->dev_addr);
- else if (priv->eth_addr.broadcast_enabled)
+ else if (priv->fs.l2.broadcast_enabled)
ether_addr_copy(addr_array[i++], ndev->broadcast);
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
@@ -725,7 +364,7 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
int list_type)
{
bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
- struct mlx5e_eth_addr_hash_node *hn;
+ struct mlx5e_l2_hash_node *hn;
u8 (*addr_array)[ETH_ALEN] = NULL;
struct hlist_head *addr_list;
struct hlist_node *tmp;
@@ -734,12 +373,12 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
int err;
int hi;
- size = is_uc ? 0 : (priv->eth_addr.broadcast_enabled ? 1 : 0);
+ size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
max_size = is_uc ?
1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
- addr_list = is_uc ? priv->eth_addr.netdev_uc : priv->eth_addr.netdev_mc;
+ addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
size++;
@@ -770,7 +409,7 @@ out:
static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
{
- struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+ struct mlx5e_l2_table *ea = &priv->fs.l2;
mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
@@ -781,26 +420,26 @@ static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
{
- struct mlx5e_eth_addr_hash_node *hn;
+ struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
- mlx5e_execute_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
+ mlx5e_execute_l2_action(priv, hn);
- mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
- mlx5e_execute_action(priv, hn);
+ mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
+ mlx5e_execute_l2_action(priv, hn);
}
static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
{
- struct mlx5e_eth_addr_hash_node *hn;
+ struct mlx5e_l2_hash_node *hn;
struct hlist_node *tmp;
int i;
- mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+ mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
hn->action = MLX5E_ACTION_DEL;
- mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+ mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
hn->action = MLX5E_ACTION_DEL;
if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
@@ -814,7 +453,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
set_rx_mode_work);
- struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+ struct mlx5e_l2_table *ea = &priv->fs.l2;
struct net_device *ndev = priv->netdev;
bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
@@ -830,27 +469,27 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
if (enable_promisc) {
- mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
- if (!priv->vlan.filter_disabled)
+ mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
+ if (!priv->fs.vlan.filter_disabled)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
0);
}
if (enable_allmulti)
- mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+ mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
if (enable_broadcast)
- mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+ mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
mlx5e_handle_netdev_addr(priv);
if (disable_broadcast)
- mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
+ mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
if (disable_allmulti)
- mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
+ mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
if (disable_promisc) {
- if (!priv->vlan.filter_disabled)
+ if (!priv->fs.vlan.filter_disabled)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
0);
- mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+ mlx5e_del_l2_flow_rule(priv, &ea->promisc);
}
ea->promisc_enabled = promisc_enabled;
@@ -872,224 +511,441 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
ft->num_groups = 0;
}
-void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
+void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
{
- ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
+ ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
}
-#define MLX5E_MAIN_GROUP0_SIZE BIT(3)
-#define MLX5E_MAIN_GROUP1_SIZE BIT(1)
-#define MLX5E_MAIN_GROUP2_SIZE BIT(0)
-#define MLX5E_MAIN_GROUP3_SIZE BIT(14)
-#define MLX5E_MAIN_GROUP4_SIZE BIT(13)
-#define MLX5E_MAIN_GROUP5_SIZE BIT(11)
-#define MLX5E_MAIN_GROUP6_SIZE BIT(2)
-#define MLX5E_MAIN_GROUP7_SIZE BIT(1)
-#define MLX5E_MAIN_GROUP8_SIZE BIT(0)
-#define MLX5E_MAIN_TABLE_SIZE (MLX5E_MAIN_GROUP0_SIZE +\
- MLX5E_MAIN_GROUP1_SIZE +\
- MLX5E_MAIN_GROUP2_SIZE +\
- MLX5E_MAIN_GROUP3_SIZE +\
- MLX5E_MAIN_GROUP4_SIZE +\
- MLX5E_MAIN_GROUP5_SIZE +\
- MLX5E_MAIN_GROUP6_SIZE +\
- MLX5E_MAIN_GROUP7_SIZE +\
- MLX5E_MAIN_GROUP8_SIZE)
-
-static int __mlx5e_create_main_groups(struct mlx5e_flow_table *ft, u32 *in,
- int inlen)
+void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
{
- u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
- u8 *dmac = MLX5_ADDR_OF(create_flow_group_in, in,
- match_criteria.outer_headers.dmac_47_16);
+ mlx5e_destroy_groups(ft);
+ kfree(ft->g);
+ mlx5_destroy_flow_table(ft->t);
+ ft->t = NULL;
+}
+
+static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++) {
+ if (!IS_ERR_OR_NULL(ttc->rules[i])) {
+ mlx5_del_flow_rule(ttc->rules[i]);
+ ttc->rules[i] = NULL;
+ }
+ }
+}
+
+static struct {
+ u16 etype;
+ u8 proto;
+} ttc_rules[] = {
+ [MLX5E_TT_IPV4_TCP] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_TCP,
+ },
+ [MLX5E_TT_IPV6_TCP] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_TCP,
+ },
+ [MLX5E_TT_IPV4_UDP] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_UDP,
+ },
+ [MLX5E_TT_IPV6_UDP] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_UDP,
+ },
+ [MLX5E_TT_IPV4_IPSEC_AH] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_AH,
+ },
+ [MLX5E_TT_IPV6_IPSEC_AH] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_AH,
+ },
+ [MLX5E_TT_IPV4_IPSEC_ESP] = {
+ .etype = ETH_P_IP,
+ .proto = IPPROTO_ESP,
+ },
+ [MLX5E_TT_IPV6_IPSEC_ESP] = {
+ .etype = ETH_P_IPV6,
+ .proto = IPPROTO_ESP,
+ },
+ [MLX5E_TT_IPV4] = {
+ .etype = ETH_P_IP,
+ .proto = 0,
+ },
+ [MLX5E_TT_IPV6] = {
+ .etype = ETH_P_IPV6,
+ .proto = 0,
+ },
+ [MLX5E_TT_ANY] = {
+ .etype = 0,
+ .proto = 0,
+ },
+};
+
+static struct mlx5_flow_rule *mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_destination *dest,
+ u16 etype,
+ u8 proto)
+{
+ struct mlx5_flow_rule *rule;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (proto) {
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
+ }
+ if (etype) {
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
+ }
+
+ rule = mlx5_add_flow_rule(ft, spec,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG,
+ dest);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
+ }
+
+ kvfree(spec);
+ return err ? ERR_PTR(err) : rule;
+}
+
+static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_destination dest;
+ struct mlx5e_ttc_table *ttc;
+ struct mlx5_flow_rule **rules;
+ struct mlx5_flow_table *ft;
+ int tt;
int err;
+
+ ttc = &priv->fs.ttc;
+ ft = ttc->ft.t;
+ rules = ttc->rules;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
+ if (tt == MLX5E_TT_ANY)
+ dest.tir_num = priv->direct_tir[0].tirn;
+ else
+ dest.tir_num = priv->indir_tir[tt].tirn;
+ rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
+ ttc_rules[tt].etype,
+ ttc_rules[tt].proto);
+ if (IS_ERR(rules[tt]))
+ goto del_rules;
+ }
+
+ return 0;
+
+del_rules:
+ err = PTR_ERR(rules[tt]);
+ rules[tt] = NULL;
+ mlx5e_cleanup_ttc_rules(ttc);
+ return err;
+}
+
+#define MLX5E_TTC_NUM_GROUPS 3
+#define MLX5E_TTC_GROUP1_SIZE BIT(3)
+#define MLX5E_TTC_GROUP2_SIZE BIT(1)
+#define MLX5E_TTC_GROUP3_SIZE BIT(0)
+#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
+ MLX5E_TTC_GROUP2_SIZE +\
+ MLX5E_TTC_GROUP3_SIZE)
+static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5e_flow_table *ft = &ttc->ft;
int ix = 0;
+ u32 *in;
+ int err;
+ u8 *mc;
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP0_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
- ft->num_groups++;
+ ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
+ sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ /* L4 Group */
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP1_SIZE;
+ ix += MLX5E_TTC_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
+ goto err;
ft->num_groups++;
- memset(in, 0, inlen);
+ /* L3 Group */
+ MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP2_SIZE;
+ ix += MLX5E_TTC_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
+ goto err;
ft->num_groups++;
+ /* Any Group */
memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
- eth_broadcast_addr(dmac);
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP3_SIZE;
+ ix += MLX5E_TTC_GROUP3_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
+ goto err;
ft->num_groups++;
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- eth_broadcast_addr(dmac);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP4_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
- ft->num_groups++;
+ kvfree(in);
+ return 0;
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- eth_broadcast_addr(dmac);
- MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP5_SIZE;
- MLX5_SET_CFG(in, end_flow_index, ix - 1);
- ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
- if (IS_ERR(ft->g[ft->num_groups]))
- goto err_destroy_groups;
- ft->num_groups++;
+err:
+ err = PTR_ERR(ft->g[ft->num_groups]);
+ ft->g[ft->num_groups] = NULL;
+ kvfree(in);
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
- dmac[0] = 0x01;
+ return err;
+}
+
+static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
+
+ mlx5e_cleanup_ttc_rules(ttc);
+ mlx5e_destroy_flow_table(&ttc->ft);
+}
+
+static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
+ struct mlx5e_flow_table *ft = &ttc->ft;
+ int err;
+
+ ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+ MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL);
+ if (IS_ERR(ft->t)) {
+ err = PTR_ERR(ft->t);
+ ft->t = NULL;
+ return err;
+ }
+
+ err = mlx5e_create_ttc_table_groups(ttc);
+ if (err)
+ goto err;
+
+ err = mlx5e_generate_ttc_table_rules(priv);
+ if (err)
+ goto err;
+
+ return 0;
+err:
+ mlx5e_destroy_flow_table(ft);
+ return err;
+}
+
+static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5e_l2_rule *ai)
+{
+ if (!IS_ERR_OR_NULL(ai->rule)) {
+ mlx5_del_flow_rule(ai->rule);
+ ai->rule = NULL;
+ }
+}
+
+static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5e_l2_rule *ai, int type)
+{
+ struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+ u8 *mc_dmac;
+ u8 *mv_dmac;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ return -ENOMEM;
+ }
+
+ mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers.dmac_47_16);
+ mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers.dmac_47_16);
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = priv->fs.ttc.ft.t;
+
+ switch (type) {
+ case MLX5E_FULLMATCH:
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ eth_broadcast_addr(mc_dmac);
+ ether_addr_copy(mv_dmac, ai->addr);
+ break;
+
+ case MLX5E_ALLMULTI:
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ mc_dmac[0] = 0x01;
+ mv_dmac[0] = 0x01;
+ break;
+
+ case MLX5E_PROMISC:
+ break;
+ }
+
+ ai->rule = mlx5_add_flow_rule(ft, spec,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ MLX5_FS_DEFAULT_FLOW_TAG, &dest);
+ if (IS_ERR(ai->rule)) {
+ netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
+ __func__, mv_dmac);
+ err = PTR_ERR(ai->rule);
+ ai->rule = NULL;
+ }
+
+ kvfree(spec);
+
+ return err;
+}
+
+#define MLX5E_NUM_L2_GROUPS 3
+#define MLX5E_L2_GROUP1_SIZE BIT(0)
+#define MLX5E_L2_GROUP2_SIZE BIT(15)
+#define MLX5E_L2_GROUP3_SIZE BIT(0)
+#define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
+ MLX5E_L2_GROUP2_SIZE +\
+ MLX5E_L2_GROUP3_SIZE)
+static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5e_flow_table *ft = &l2_table->ft;
+ int ix = 0;
+ u8 *mc_dmac;
+ u32 *in;
+ int err;
+ u8 *mc;
+
+ ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
+ if (!ft->g)
+ return -ENOMEM;
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ kfree(ft->g);
+ return -ENOMEM;
+ }
+
+ mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+ mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
+ outer_headers.dmac_47_16);
+ /* Flow Group for promiscuous */
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP6_SIZE;
+ ix += MLX5E_L2_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups;
ft->num_groups++;
- memset(in, 0, inlen);
+ /* Flow Group for full match */
+ eth_broadcast_addr(mc_dmac);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
- dmac[0] = 0x01;
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP7_SIZE;
+ ix += MLX5E_L2_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups;
ft->num_groups++;
- memset(in, 0, inlen);
- MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- dmac[0] = 0x01;
+ /* Flow Group for allmulti */
+ eth_zero_addr(mc_dmac);
+ mc_dmac[0] = 0x01;
MLX5_SET_CFG(in, start_flow_index, ix);
- ix += MLX5E_MAIN_GROUP8_SIZE;
+ ix += MLX5E_L2_GROUP3_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups;
ft->num_groups++;
+ kvfree(in);
return 0;
err_destroy_groups:
err = PTR_ERR(ft->g[ft->num_groups]);
ft->g[ft->num_groups] = NULL;
mlx5e_destroy_groups(ft);
+ kvfree(in);
return err;
}
-static int mlx5e_create_main_groups(struct mlx5e_flow_table *ft)
+static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
{
- u32 *in;
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- int err;
-
- in = mlx5_vzalloc(inlen);
- if (!in)
- return -ENOMEM;
-
- err = __mlx5e_create_main_groups(ft, in, inlen);
-
- kvfree(in);
- return err;
+ mlx5e_destroy_flow_table(&priv->fs.l2.ft);
}
-static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
{
- struct mlx5e_flow_table *ft = &priv->fts.main;
+ struct mlx5e_l2_table *l2_table = &priv->fs.l2;
+ struct mlx5e_flow_table *ft = &l2_table->ft;
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_MAIN_TABLE_SIZE);
+ ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+ MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
return err;
}
- ft->g = kcalloc(MLX5E_NUM_MAIN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
- if (!ft->g) {
- err = -ENOMEM;
- goto err_destroy_main_flow_table;
- }
- err = mlx5e_create_main_groups(ft);
+ err = mlx5e_create_l2_table_groups(l2_table);
if (err)
- goto err_free_g;
- return 0;
+ goto err_destroy_flow_table;
-err_free_g:
- kfree(ft->g);
+ return 0;
-err_destroy_main_flow_table:
+err_destroy_flow_table:
mlx5_destroy_flow_table(ft->t);
ft->t = NULL;
return err;
}
-static void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
-{
- mlx5e_destroy_groups(ft);
- kfree(ft->g);
- mlx5_destroy_flow_table(ft->t);
- ft->t = NULL;
-}
-
-static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
-{
- mlx5e_destroy_flow_table(&priv->fts.main);
-}
-
#define MLX5E_NUM_VLAN_GROUPS 2
#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
#define MLX5E_VLAN_GROUP1_SIZE BIT(1)
#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
MLX5E_VLAN_GROUP1_SIZE)
-static int __mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft, u32 *in,
- int inlen)
+static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
+ int inlen)
{
int err;
int ix = 0;
@@ -1128,7 +984,7 @@ err_destroy_groups:
return err;
}
-static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
+static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
{
u32 *in;
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
@@ -1138,19 +994,20 @@ static int mlx5e_create_vlan_groups(struct mlx5e_flow_table *ft)
if (!in)
return -ENOMEM;
- err = __mlx5e_create_vlan_groups(ft, in, inlen);
+ err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
kvfree(in);
return err;
}
-static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
{
- struct mlx5e_flow_table *ft = &priv->fts.vlan;
+ struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
int err;
ft->num_groups = 0;
- ft->t = mlx5_create_flow_table(priv->fts.ns, 1, MLX5E_VLAN_TABLE_SIZE);
+ ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
+ MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
@@ -1160,65 +1017,93 @@ static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g) {
err = -ENOMEM;
- goto err_destroy_vlan_flow_table;
+ goto err_destroy_vlan_table;
}
- err = mlx5e_create_vlan_groups(ft);
+ err = mlx5e_create_vlan_table_groups(ft);
if (err)
goto err_free_g;
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ if (err)
+ goto err_destroy_vlan_flow_groups;
+
return 0;
+err_destroy_vlan_flow_groups:
+ mlx5e_destroy_groups(ft);
err_free_g:
kfree(ft->g);
-
-err_destroy_vlan_flow_table:
+err_destroy_vlan_table:
mlx5_destroy_flow_table(ft->t);
ft->t = NULL;
return err;
}
-static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
+static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
- mlx5e_destroy_flow_table(&priv->fts.vlan);
+ mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
}
-int mlx5e_create_flow_tables(struct mlx5e_priv *priv)
+int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
{
int err;
- priv->fts.ns = mlx5_get_flow_namespace(priv->mdev,
+ priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
- if (!priv->fts.ns)
+ if (!priv->fs.ns)
return -EINVAL;
- err = mlx5e_create_vlan_flow_table(priv);
- if (err)
- return err;
+ err = mlx5e_arfs_create_tables(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
+ err);
+ priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
+ }
- err = mlx5e_create_main_flow_table(priv);
- if (err)
- goto err_destroy_vlan_flow_table;
+ err = mlx5e_create_ttc_table(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
+ err);
+ goto err_destroy_arfs_tables;
+ }
- err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- if (err)
- goto err_destroy_main_flow_table;
+ err = mlx5e_create_l2_table(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
+ err);
+ goto err_destroy_ttc_table;
+ }
+
+ err = mlx5e_create_vlan_table(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
+ err);
+ goto err_destroy_l2_table;
+ }
+
+ mlx5e_ethtool_init_steering(priv);
return 0;
-err_destroy_main_flow_table:
- mlx5e_destroy_main_flow_table(priv);
-err_destroy_vlan_flow_table:
- mlx5e_destroy_vlan_flow_table(priv);
+err_destroy_l2_table:
+ mlx5e_destroy_l2_table(priv);
+err_destroy_ttc_table:
+ mlx5e_destroy_ttc_table(priv);
+err_destroy_arfs_tables:
+ mlx5e_arfs_destroy_tables(priv);
return err;
}
-void mlx5e_destroy_flow_tables(struct mlx5e_priv *priv)
+void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- mlx5e_destroy_main_flow_table(priv);
- mlx5e_destroy_vlan_flow_table(priv);
+ mlx5e_destroy_vlan_table(priv);
+ mlx5e_destroy_l2_table(priv);
+ mlx5e_destroy_ttc_table(priv);
+ mlx5e_arfs_destroy_tables(priv);
+ mlx5e_ethtool_cleanup_steering(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
new file mode 100644
index 000000000000..d17c24227900
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -0,0 +1,586 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/fs.h>
+#include "en.h"
+
+struct mlx5e_ethtool_rule {
+ struct list_head list;
+ struct ethtool_rx_flow_spec flow_spec;
+ struct mlx5_flow_rule *rule;
+ struct mlx5e_ethtool_table *eth_ft;
+};
+
+static void put_flow_table(struct mlx5e_ethtool_table *eth_ft)
+{
+ if (!--eth_ft->num_rules) {
+ mlx5_destroy_flow_table(eth_ft->ft);
+ eth_ft->ft = NULL;
+ }
+}
+
+#define MLX5E_ETHTOOL_L3_L4_PRIO 0
+#define MLX5E_ETHTOOL_L2_PRIO (MLX5E_ETHTOOL_L3_L4_PRIO + ETHTOOL_NUM_L3_L4_FTS)
+#define MLX5E_ETHTOOL_NUM_ENTRIES 64000
+#define MLX5E_ETHTOOL_NUM_GROUPS 10
+static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
+ struct ethtool_rx_flow_spec *fs,
+ int num_tuples)
+{
+ struct mlx5e_ethtool_table *eth_ft;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft;
+ int max_tuples;
+ int table_size;
+ int prio;
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ max_tuples = ETHTOOL_NUM_L3_L4_FTS;
+ prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
+ eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ break;
+ case IP_USER_FLOW:
+ max_tuples = ETHTOOL_NUM_L3_L4_FTS;
+ prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
+ eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
+ break;
+ case ETHER_FLOW:
+ max_tuples = ETHTOOL_NUM_L2_FTS;
+ prio = max_tuples - num_tuples;
+ eth_ft = &priv->fs.ethtool.l2_ft[prio];
+ prio += MLX5E_ETHTOOL_L2_PRIO;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ eth_ft->num_rules++;
+ if (eth_ft->ft)
+ return eth_ft;
+
+ ns = mlx5_get_flow_namespace(priv->mdev,
+ MLX5_FLOW_NAMESPACE_ETHTOOL);
+ if (!ns)
+ return ERR_PTR(-ENOTSUPP);
+
+ table_size = min_t(u32, BIT(MLX5_CAP_FLOWTABLE(priv->mdev,
+ flow_table_properties_nic_receive.log_max_ft_size)),
+ MLX5E_ETHTOOL_NUM_ENTRIES);
+ ft = mlx5_create_auto_grouped_flow_table(ns, prio,
+ table_size,
+ MLX5E_ETHTOOL_NUM_GROUPS, 0);
+ if (IS_ERR(ft))
+ return (void *)ft;
+
+ eth_ft->ft = ft;
+ return eth_ft;
+}
+
+static void mask_spec(u8 *mask, u8 *val, size_t size)
+{
+ unsigned int i;
+
+ for (i = 0; i < size; i++, mask++, val++)
+ *((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
+}
+
+static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m,
+ __be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
+{
+ if (ip4src_m) {
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ &ip4src_v, sizeof(ip4src_v));
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ src_ipv4_src_ipv6.ipv4_layout.ipv4),
+ 0xff, sizeof(ip4src_m));
+ }
+ if (ip4dst_m) {
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ &ip4dst_v, sizeof(ip4dst_v));
+ memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
+ dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
+ 0xff, sizeof(ip4dst_m));
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ ethertype, ETH_P_IP);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ ethertype, 0xffff);
+}
+
+static int set_flow_attrs(u32 *match_c, u32 *match_v,
+ struct ethtool_rx_flow_spec *fs)
+{
+ void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ outer_headers);
+ void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ outer_headers);
+ u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ struct ethtool_tcpip4_spec *l4_mask;
+ struct ethtool_tcpip4_spec *l4_val;
+ struct ethtool_usrip4_spec *l3_mask;
+ struct ethtool_usrip4_spec *l3_val;
+ struct ethhdr *eth_val;
+ struct ethhdr *eth_mask;
+
+ switch (flow_type) {
+ case TCP_V4_FLOW:
+ l4_mask = &fs->m_u.tcp_ip4_spec;
+ l4_val = &fs->h_u.tcp_ip4_spec;
+ set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
+ l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
+
+ if (l4_mask->psrc) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
+ ntohs(l4_val->psrc));
+ }
+ if (l4_mask->pdst) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
+ ntohs(l4_val->pdst));
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+ IPPROTO_TCP);
+ break;
+ case UDP_V4_FLOW:
+ l4_mask = &fs->m_u.tcp_ip4_spec;
+ l4_val = &fs->h_u.tcp_ip4_spec;
+ set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
+ l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
+
+ if (l4_mask->psrc) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
+ ntohs(l4_val->psrc));
+ }
+ if (l4_mask->pdst) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
+ ntohs(l4_val->pdst));
+ }
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
+ 0xffff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
+ IPPROTO_UDP);
+ break;
+ case IP_USER_FLOW:
+ l3_mask = &fs->m_u.usr_ip4_spec;
+ l3_val = &fs->h_u.usr_ip4_spec;
+ set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src,
+ l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst);
+ break;
+ case ETHER_FLOW:
+ eth_mask = &fs->m_u.ether_spec;
+ eth_val = &fs->h_u.ether_spec;
+
+ mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_c, smac_47_16),
+ eth_mask->h_source);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_v, smac_47_16),
+ eth_val->h_source);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_c, dmac_47_16),
+ eth_mask->h_dest);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_v, dmac_47_16),
+ eth_val->h_dest);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype,
+ ntohs(eth_mask->h_proto));
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype,
+ ntohs(eth_val->h_proto));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ vlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ vlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
+ first_vid, 0xfff);
+ MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
+ first_vid, ntohs(fs->h_ext.vlan_tci));
+ }
+ if (fs->flow_type & FLOW_MAC_EXT &&
+ !is_zero_ether_addr(fs->m_ext.h_dest)) {
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_c, dmac_47_16),
+ fs->m_ext.h_dest);
+ ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
+ outer_headers_v, dmac_47_16),
+ fs->h_ext.h_dest);
+ }
+
+ return 0;
+}
+
+static void add_rule_to_list(struct mlx5e_priv *priv,
+ struct mlx5e_ethtool_rule *rule)
+{
+ struct mlx5e_ethtool_rule *iter;
+ struct list_head *head = &priv->fs.ethtool.rules;
+
+ list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ if (iter->flow_spec.location > rule->flow_spec.location)
+ break;
+ head = &iter->list;
+ }
+ priv->fs.ethtool.tot_num_rules++;
+ list_add(&rule->list, head);
+}
+
+static bool outer_header_zero(u32 *match_criteria)
+{
+ int size = MLX5_ST_SZ_BYTES(fte_match_param);
+ char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers);
+
+ return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
+ outer_headers_c + 1,
+ size - 1);
+}
+
+static struct mlx5_flow_rule *add_ethtool_flow_rule(struct mlx5e_priv *priv,
+ struct mlx5_flow_table *ft,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct mlx5_flow_destination *dst = NULL;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_rule *rule;
+ int err = 0;
+ u32 action;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+ err = set_flow_attrs(spec->match_criteria, spec->match_value,
+ fs);
+ if (err)
+ goto free;
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
+ action = MLX5_FLOW_CONTEXT_ACTION_DROP;
+ } else {
+ dst = kzalloc(sizeof(*dst), GFP_KERNEL);
+ if (!dst) {
+ err = -ENOMEM;
+ goto free;
+ }
+
+ dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dst->tir_num = priv->direct_tir[fs->ring_cookie].tirn;
+ action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ }
+
+ spec->match_criteria_enable = (!outer_header_zero(spec->match_criteria));
+ rule = mlx5_add_flow_rule(ft, spec, action,
+ MLX5_FS_DEFAULT_FLOW_TAG, dst);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ netdev_err(priv->netdev, "%s: failed to add ethtool steering rule: %d\n",
+ __func__, err);
+ goto free;
+ }
+free:
+ kvfree(spec);
+ kfree(dst);
+ return err ? ERR_PTR(err) : rule;
+}
+
+static void del_ethtool_rule(struct mlx5e_priv *priv,
+ struct mlx5e_ethtool_rule *eth_rule)
+{
+ if (eth_rule->rule)
+ mlx5_del_flow_rule(eth_rule->rule);
+ list_del(&eth_rule->list);
+ priv->fs.ethtool.tot_num_rules--;
+ put_flow_table(eth_rule->eth_ft);
+ kfree(eth_rule);
+}
+
+static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
+ int location)
+{
+ struct mlx5e_ethtool_rule *iter;
+
+ list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
+ if (iter->flow_spec.location == location)
+ return iter;
+ }
+ return NULL;
+}
+
+static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
+ int location)
+{
+ struct mlx5e_ethtool_rule *eth_rule;
+
+ eth_rule = find_ethtool_rule(priv, location);
+ if (eth_rule)
+ del_ethtool_rule(priv, eth_rule);
+
+ eth_rule = kzalloc(sizeof(*eth_rule), GFP_KERNEL);
+ if (!eth_rule)
+ return ERR_PTR(-ENOMEM);
+
+ add_rule_to_list(priv, eth_rule);
+ return eth_rule;
+}
+
+#define MAX_NUM_OF_ETHTOOL_RULES BIT(10)
+
+#define all_ones(field) (field == (__force typeof(field))-1)
+#define all_zeros_or_all_ones(field) \
+ ((field) == 0 || (field) == (__force typeof(field))-1)
+
+static int validate_flow(struct mlx5e_priv *priv,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct ethtool_tcpip4_spec *l4_mask;
+ struct ethtool_usrip4_spec *l3_mask;
+ struct ethhdr *eth_mask;
+ int num_tuples = 0;
+
+ if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
+ return -EINVAL;
+
+ if (fs->ring_cookie >= priv->params.num_channels &&
+ fs->ring_cookie != RX_CLS_FLOW_DISC)
+ return -EINVAL;
+
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case ETHER_FLOW:
+ eth_mask = &fs->m_u.ether_spec;
+ if (!is_zero_ether_addr(eth_mask->h_dest))
+ num_tuples++;
+ if (!is_zero_ether_addr(eth_mask->h_source))
+ num_tuples++;
+ if (eth_mask->h_proto)
+ num_tuples++;
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ if (fs->m_u.tcp_ip4_spec.tos)
+ return -EINVAL;
+ l4_mask = &fs->m_u.tcp_ip4_spec;
+ if (l4_mask->ip4src) {
+ if (!all_ones(l4_mask->ip4src))
+ return -EINVAL;
+ num_tuples++;
+ }
+ if (l4_mask->ip4dst) {
+ if (!all_ones(l4_mask->ip4dst))
+ return -EINVAL;
+ num_tuples++;
+ }
+ if (l4_mask->psrc) {
+ if (!all_ones(l4_mask->psrc))
+ return -EINVAL;
+ num_tuples++;
+ }
+ if (l4_mask->pdst) {
+ if (!all_ones(l4_mask->pdst))
+ return -EINVAL;
+ num_tuples++;
+ }
+ /* Flow is TCP/UDP */
+ num_tuples++;
+ break;
+ case IP_USER_FLOW:
+ l3_mask = &fs->m_u.usr_ip4_spec;
+ if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto ||
+ fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
+ return -EINVAL;
+ if (l3_mask->ip4src) {
+ if (!all_ones(l3_mask->ip4src))
+ return -EINVAL;
+ num_tuples++;
+ }
+ if (l3_mask->ip4dst) {
+ if (!all_ones(l3_mask->ip4dst))
+ return -EINVAL;
+ num_tuples++;
+ }
+ /* Flow is IPv4 */
+ num_tuples++;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if ((fs->flow_type & FLOW_EXT)) {
+ if (fs->m_ext.vlan_etype ||
+ (fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)))
+ return -EINVAL;
+
+ if (fs->m_ext.vlan_tci) {
+ if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
+ return -EINVAL;
+ }
+ num_tuples++;
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT &&
+ !is_zero_ether_addr(fs->m_ext.h_dest))
+ num_tuples++;
+
+ return num_tuples;
+}
+
+int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct mlx5e_ethtool_table *eth_ft;
+ struct mlx5e_ethtool_rule *eth_rule;
+ struct mlx5_flow_rule *rule;
+ int num_tuples;
+ int err;
+
+ num_tuples = validate_flow(priv, fs);
+ if (num_tuples <= 0) {
+ netdev_warn(priv->netdev, "%s: flow is not valid\n", __func__);
+ return -EINVAL;
+ }
+
+ eth_ft = get_flow_table(priv, fs, num_tuples);
+ if (IS_ERR(eth_ft))
+ return PTR_ERR(eth_ft);
+
+ eth_rule = get_ethtool_rule(priv, fs->location);
+ if (IS_ERR(eth_rule)) {
+ put_flow_table(eth_ft);
+ return PTR_ERR(eth_rule);
+ }
+
+ eth_rule->flow_spec = *fs;
+ eth_rule->eth_ft = eth_ft;
+ if (!eth_ft->ft) {
+ err = -EINVAL;
+ goto del_ethtool_rule;
+ }
+ rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ goto del_ethtool_rule;
+ }
+
+ eth_rule->rule = rule;
+
+ return 0;
+
+del_ethtool_rule:
+ del_ethtool_rule(priv, eth_rule);
+
+ return err;
+}
+
+int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
+ int location)
+{
+ struct mlx5e_ethtool_rule *eth_rule;
+ int err = 0;
+
+ if (location >= MAX_NUM_OF_ETHTOOL_RULES)
+ return -ENOSPC;
+
+ eth_rule = find_ethtool_rule(priv, location);
+ if (!eth_rule) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ del_ethtool_rule(priv, eth_rule);
+out:
+ return err;
+}
+
+int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
+ int location)
+{
+ struct mlx5e_ethtool_rule *eth_rule;
+
+ if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
+ return -EINVAL;
+
+ list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
+ if (eth_rule->flow_spec.location == location) {
+ info->fs = eth_rule->flow_spec;
+ return 0;
+ }
+ }
+
+ return -ENOENT;
+}
+
+int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
+ u32 *rule_locs)
+{
+ int location = 0;
+ int idx = 0;
+ int err = 0;
+
+ while ((!err || err == -ENOENT) && idx < info->rule_cnt) {
+ err = mlx5e_ethtool_get_flow(priv, info, location);
+ if (!err)
+ rule_locs[idx++] = location;
+ location++;
+ }
+ return err;
+}
+
+void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
+{
+ struct mlx5e_ethtool_rule *iter;
+ struct mlx5e_ethtool_rule *temp;
+
+ list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
+ del_ethtool_rule(priv, iter);
+}
+
+void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
+{
+ INIT_LIST_HEAD(&priv->fs.ethtool.rules);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 94fef705890b..2459c7f3db8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -40,27 +40,33 @@
#include "vxlan.h"
struct mlx5e_rq_param {
- u32 rqc[MLX5_ST_SZ_DW(rqc)];
- struct mlx5_wq_param wq;
+ u32 rqc[MLX5_ST_SZ_DW(rqc)];
+ struct mlx5_wq_param wq;
+ bool am_enabled;
};
struct mlx5e_sq_param {
u32 sqc[MLX5_ST_SZ_DW(sqc)];
struct mlx5_wq_param wq;
u16 max_inline;
+ u8 min_inline_mode;
+ bool icosq;
};
struct mlx5e_cq_param {
u32 cqc[MLX5_ST_SZ_DW(cqc)];
struct mlx5_wq_param wq;
u16 eq_ix;
+ u8 cq_period_mode;
};
struct mlx5e_channel_param {
struct mlx5e_rq_param rq;
struct mlx5e_sq_param sq;
+ struct mlx5e_sq_param icosq;
struct mlx5e_cq_param rx_cq;
struct mlx5e_cq_param tx_cq;
+ struct mlx5e_cq_param icosq_cq;
};
static void mlx5e_update_carrier(struct mlx5e_priv *priv)
@@ -71,10 +77,13 @@ static void mlx5e_update_carrier(struct mlx5e_priv *priv)
port_state = mlx5_query_vport_state(mdev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
- if (port_state == VPORT_STATE_UP)
+ if (port_state == VPORT_STATE_UP) {
+ netdev_info(priv->netdev, "Link up\n");
netif_carrier_on(priv->netdev);
- else
+ } else {
+ netdev_info(priv->netdev, "Link down\n");
netif_carrier_off(priv->netdev);
+ }
}
static void mlx5e_update_carrier_work(struct work_struct *work)
@@ -88,111 +97,86 @@ static void mlx5e_update_carrier_work(struct work_struct *work)
mutex_unlock(&priv->state_lock);
}
-static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+static void mlx5e_tx_timeout_work(struct work_struct *work)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_pport_stats *s = &priv->stats.pport;
- u32 *in;
- u32 *out;
- int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
-
- in = mlx5_vzalloc(sz);
- out = mlx5_vzalloc(sz);
- if (!in || !out)
- goto free_out;
-
- MLX5_SET(ppcnt_reg, in, local_port, 1);
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out,
- sz, MLX5_REG_PPCNT, 0, 0);
- memcpy(s->IEEE_802_3_counters,
- MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
- sizeof(s->IEEE_802_3_counters));
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out,
- sz, MLX5_REG_PPCNT, 0, 0);
- memcpy(s->RFC_2863_counters,
- MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
- sizeof(s->RFC_2863_counters));
-
- MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
- mlx5_core_access_reg(mdev, in, sz, out,
- sz, MLX5_REG_PPCNT, 0, 0);
- memcpy(s->RFC_2819_counters,
- MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
- sizeof(s->RFC_2819_counters));
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ tx_timeout_work);
+ int err;
-free_out:
- kvfree(in);
- kvfree(out);
+ rtnl_lock();
+ mutex_lock(&priv->state_lock);
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+ mlx5e_close_locked(priv->netdev);
+ err = mlx5e_open_locked(priv->netdev);
+ if (err)
+ netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+ err);
+unlock:
+ mutex_unlock(&priv->state_lock);
+ rtnl_unlock();
}
-void mlx5e_update_stats(struct mlx5e_priv *priv)
+static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_vport_stats *s = &priv->stats.vport;
+ struct mlx5e_sw_stats *s = &priv->stats.sw;
struct mlx5e_rq_stats *rq_stats;
struct mlx5e_sq_stats *sq_stats;
- u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
- u32 *out;
- int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
- u64 tx_offload_none;
+ u64 tx_offload_none = 0;
int i, j;
- out = mlx5_vzalloc(outlen);
- if (!out)
- return;
-
- /* Collect firts the SW counters and then HW for consistency */
- s->rx_packets = 0;
- s->rx_bytes = 0;
- s->tx_packets = 0;
- s->tx_bytes = 0;
- s->tso_packets = 0;
- s->tso_bytes = 0;
- s->tso_inner_packets = 0;
- s->tso_inner_bytes = 0;
- s->tx_queue_stopped = 0;
- s->tx_queue_wake = 0;
- s->tx_queue_dropped = 0;
- s->tx_csum_inner = 0;
- tx_offload_none = 0;
- s->lro_packets = 0;
- s->lro_bytes = 0;
- s->rx_csum_none = 0;
- s->rx_csum_sw = 0;
- s->rx_wqe_err = 0;
+ memset(s, 0, sizeof(*s));
for (i = 0; i < priv->params.num_channels; i++) {
rq_stats = &priv->channel[i]->rq.stats;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
- s->lro_packets += rq_stats->lro_packets;
- s->lro_bytes += rq_stats->lro_bytes;
+ s->rx_lro_packets += rq_stats->lro_packets;
+ s->rx_lro_bytes += rq_stats->lro_bytes;
s->rx_csum_none += rq_stats->csum_none;
- s->rx_csum_sw += rq_stats->csum_sw;
+ s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_wqe_err += rq_stats->wqe_err;
+ s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
+ s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
+ s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
+ s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
+ s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
for (j = 0; j < priv->params.num_tc; j++) {
sq_stats = &priv->channel[i]->sq[j].stats;
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
- s->tso_packets += sq_stats->tso_packets;
- s->tso_bytes += sq_stats->tso_bytes;
- s->tso_inner_packets += sq_stats->tso_inner_packets;
- s->tso_inner_bytes += sq_stats->tso_inner_bytes;
+ s->tx_tso_packets += sq_stats->tso_packets;
+ s->tx_tso_bytes += sq_stats->tso_bytes;
+ s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
+ s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
- s->tx_csum_inner += sq_stats->csum_offload_inner;
- tx_offload_none += sq_stats->csum_offload_none;
+ s->tx_xmit_more += sq_stats->xmit_more;
+ s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
+ tx_offload_none += sq_stats->csum_none;
}
}
- /* HW counters */
+ /* Update calculated offload counters */
+ s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
+ s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
+
+ s->link_down_events_phy = MLX5_GET(ppcnt_reg,
+ priv->stats.pport.phy_counters,
+ counter_set.phys_layer_cntrs.link_down_events);
+}
+
+static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ u32 *out = (u32 *)priv->stats.vport.query_vport_out;
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+ struct mlx5_core_dev *mdev = priv->mdev;
+
memset(in, 0, sizeof(in));
MLX5_SET(query_vport_counter_in, in, opcode,
@@ -202,66 +186,79 @@ void mlx5e_update_stats(struct mlx5e_priv *priv)
memset(out, 0, outlen);
- if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
+ mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
+}
+
+static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
+ int prio;
+ void *out;
+ u32 *in;
+
+ in = mlx5_vzalloc(sz);
+ if (!in)
goto free_out;
-#define MLX5_GET_CTR(p, x) \
- MLX5_GET64(query_vport_counter_out, p, x)
-
- s->rx_error_packets =
- MLX5_GET_CTR(out, received_errors.packets);
- s->rx_error_bytes =
- MLX5_GET_CTR(out, received_errors.octets);
- s->tx_error_packets =
- MLX5_GET_CTR(out, transmit_errors.packets);
- s->tx_error_bytes =
- MLX5_GET_CTR(out, transmit_errors.octets);
-
- s->rx_unicast_packets =
- MLX5_GET_CTR(out, received_eth_unicast.packets);
- s->rx_unicast_bytes =
- MLX5_GET_CTR(out, received_eth_unicast.octets);
- s->tx_unicast_packets =
- MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
- s->tx_unicast_bytes =
- MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
-
- s->rx_multicast_packets =
- MLX5_GET_CTR(out, received_eth_multicast.packets);
- s->rx_multicast_bytes =
- MLX5_GET_CTR(out, received_eth_multicast.octets);
- s->tx_multicast_packets =
- MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
- s->tx_multicast_bytes =
- MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
-
- s->rx_broadcast_packets =
- MLX5_GET_CTR(out, received_eth_broadcast.packets);
- s->rx_broadcast_bytes =
- MLX5_GET_CTR(out, received_eth_broadcast.octets);
- s->tx_broadcast_packets =
- MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
- s->tx_broadcast_bytes =
- MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+ MLX5_SET(ppcnt_reg, in, local_port, 1);
+
+ out = pstats->IEEE_802_3_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
- /* Update calculated offload counters */
- s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
- s->rx_csum_good = s->rx_packets - s->rx_csum_none -
- s->rx_csum_sw;
+ out = pstats->RFC_2863_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+ out = pstats->RFC_2819_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+ out = pstats->phy_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ out = pstats->per_prio_counters[prio];
+ MLX5_SET(ppcnt_reg, in, prio_tc, prio);
+ mlx5_core_access_reg(mdev, in, sz, out, sz,
+ MLX5_REG_PPCNT, 0, 0);
+ }
- mlx5e_update_pport_counters(priv);
free_out:
- kvfree(out);
+ kvfree(in);
}
-static void mlx5e_update_stats_work(struct work_struct *work)
+static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
+{
+ struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
+
+ if (!priv->q_counter)
+ return;
+
+ mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
+ &qcnt->rx_out_of_buffer);
+}
+
+void mlx5e_update_stats(struct mlx5e_priv *priv)
+{
+ mlx5e_update_q_counter(priv);
+ mlx5e_update_vport_counters(priv);
+ mlx5e_update_pport_counters(priv);
+ mlx5e_update_sw_counters(priv);
+}
+
+void mlx5e_update_stats_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
update_stats_work);
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
- mlx5e_update_stats(priv);
+ priv->profile->update_stats(priv);
queue_delayed_work(priv->wq, dwork,
msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
}
@@ -273,7 +270,7 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
{
struct mlx5e_priv *priv = vpriv;
- if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+ if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
return;
switch (event) {
@@ -289,12 +286,12 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
{
- set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
}
static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
{
- clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
}
@@ -309,6 +306,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
struct mlx5_core_dev *mdev = priv->mdev;
void *rqc = param->rqc;
void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ u32 byte_count;
int wq_sz;
int err;
int i;
@@ -323,32 +321,64 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
- rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
- cpu_to_node(c->cpu));
- if (!rq->skb) {
- err = -ENOMEM;
- goto err_rq_wq_destroy;
- }
- rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
- MLX5E_SW2HW_MTU(priv->netdev->mtu);
- rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz + MLX5E_NET_IP_ALIGN);
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
+ GFP_KERNEL, cpu_to_node(c->cpu));
+ if (!rq->wqe_info) {
+ err = -ENOMEM;
+ goto err_rq_wq_destroy;
+ }
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
+ rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
+
+ rq->mpwqe_mtt_offset = c->ix *
+ MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
+
+ rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
+ rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
+ rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
+ byte_count = rq->wqe_sz;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (!rq->skb) {
+ err = -ENOMEM;
+ goto err_rq_wq_destroy;
+ }
+ rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
+ rq->alloc_wqe = mlx5e_alloc_rx_wqe;
+ rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
+
+ rq->wqe_sz = (priv->params.lro_en) ?
+ priv->params.lro_wqe_sz :
+ MLX5E_SW2HW_MTU(priv->netdev->mtu);
+ rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
+ byte_count = rq->wqe_sz;
+ byte_count |= MLX5_HW_START_PADDING;
+ }
for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
- u32 byte_count = rq->wqe_sz - MLX5E_NET_IP_ALIGN;
- wqe->data.lkey = c->mkey_be;
- wqe->data.byte_count =
- cpu_to_be32(byte_count | MLX5_HW_START_PADDING);
+ wqe->data.byte_count = cpu_to_be32(byte_count);
}
+ INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
+ rq->am.mode = priv->params.rx_cq_period_mode;
+
+ rq->wq_type = priv->params.rq_wq_type;
rq->pdev = c->pdev;
rq->netdev = c->netdev;
rq->tstamp = &priv->tstamp;
rq->channel = c;
rq->ix = c->ix;
rq->priv = c->priv;
+ rq->mkey_be = c->mkey_be;
+ rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
return 0;
@@ -360,7 +390,14 @@ err_rq_wq_destroy:
static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
- kfree(rq->skb);
+ switch (rq->wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ kfree(rq->wqe_info);
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ kfree(rq->skb);
+ }
+
mlx5_wq_destroy(&rq->wq_ctrl);
}
@@ -388,7 +425,7 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
- MLX5_SET(rqc, rqc, flush_in_error_en, 1);
+ MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
@@ -403,7 +440,8 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
return err;
}
-static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
+static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
+ int next_state)
{
struct mlx5e_channel *c = rq->channel;
struct mlx5e_priv *priv = c->priv;
@@ -431,6 +469,36 @@ static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
return err;
}
+static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *rqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
+ MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD);
+ MLX5_SET(rqc, rqc, vsd, vsd);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
+
+ err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
static void mlx5e_disable_rq(struct mlx5e_rq *rq)
{
mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
@@ -453,10 +521,33 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
return -ETIMEDOUT;
}
+static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+ struct mlx5e_rx_wqe *wqe;
+ __be16 wqe_ix_be;
+ u16 wqe_ix;
+
+ /* UMR WQE (if in progress) is always at wq->head */
+ if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+ mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+
+ while (!mlx5_wq_ll_is_empty(wq)) {
+ wqe_ix_be = *wq->tail_next;
+ wqe_ix = be16_to_cpu(wqe_ix_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+ rq->dealloc_wqe(rq, wqe_ix);
+ mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+ &wqe->next.next_wqe_index);
+ }
+}
+
static int mlx5e_open_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
{
+ struct mlx5e_sq *sq = &c->icosq;
+ u16 pi = sq->pc & sq->wq.sz_m1;
int err;
err = mlx5e_create_rq(c, param, rq);
@@ -467,12 +558,16 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (err)
goto err_destroy_rq;
- err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err)
goto err_disable_rq;
- set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
- mlx5e_send_nop(&c->sq[0], true); /* trigger mlx5e_post_rx_wqes() */
+ if (param->am_enabled)
+ set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
+
+ sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
+ sq->ico_wqe_info[pi].num_wqebbs = 1;
+ mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
return 0;
@@ -486,17 +581,12 @@ err_destroy_rq:
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+ set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
-
- mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
- while (!mlx5_wq_ll_is_empty(&rq->wq))
- msleep(20);
-
- /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
- napi_synchronize(&rq->channel->napi);
+ cancel_work_sync(&rq->am.work);
mlx5e_disable_rq(rq);
+ mlx5e_free_rx_descs(rq);
mlx5e_destroy_rq(rq);
}
@@ -538,10 +628,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
void *sqc = param->sqc;
void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
- int txq_ix;
int err;
- err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
+ err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
if (err)
return err;
@@ -561,13 +650,32 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
}
sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
sq->max_inline = param->max_inline;
+ sq->min_inline_mode =
+ MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5E_INLINE_MODE_VPORT_CONTEXT ?
+ param->min_inline_mode : 0;
err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
if (err)
goto err_sq_wq_destroy;
- txq_ix = c->ix + tc * priv->params.num_channels;
- sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
+ if (param->icosq) {
+ u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+
+ sq->ico_wqe_info = kzalloc_node(sizeof(*sq->ico_wqe_info) *
+ wq_sz,
+ GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (!sq->ico_wqe_info) {
+ err = -ENOMEM;
+ goto err_free_sq_db;
+ }
+ } else {
+ int txq_ix;
+
+ txq_ix = c->ix + tc * priv->params.num_channels;
+ sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
+ priv->txq_to_sq_map[txq_ix] = sq;
+ }
sq->pdev = c->pdev;
sq->tstamp = &priv->tstamp;
@@ -576,10 +684,12 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
sq->tc = tc;
sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
sq->bf_budget = MLX5E_SQ_BF_BUDGET;
- priv->txq_to_sq_map[txq_ix] = sq;
return 0;
+err_free_sq_db:
+ mlx5e_free_sq_db(sq);
+
err_sq_wq_destroy:
mlx5_wq_destroy(&sq->wq_ctrl);
@@ -594,6 +704,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
struct mlx5e_channel *c = sq->channel;
struct mlx5e_priv *priv = c->priv;
+ kfree(sq->ico_wqe_info);
mlx5e_free_sq_db(sq);
mlx5_wq_destroy(&sq->wq_ctrl);
mlx5_unmap_free_uar(priv->mdev, &sq->uar);
@@ -622,10 +733,11 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
memcpy(sqc, param->sqc, sizeof(param->sqc));
- MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
- MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
+ MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
+ MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
+ MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
- MLX5_SET(sqc, sqc, tis_lst_sz, 1);
+ MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
@@ -644,7 +756,8 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
return err;
}
-static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
+static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
+ int next_state, bool update_rl, int rl_index)
{
struct mlx5e_channel *c = sq->channel;
struct mlx5e_priv *priv = c->priv;
@@ -664,6 +777,10 @@ static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
MLX5_SET(modify_sq_in, in, sq_state, curr_state);
MLX5_SET(sqc, sqc, state, next_state);
+ if (update_rl && next_state == MLX5_SQC_STATE_RDY) {
+ MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
+ MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
+ }
err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
@@ -679,6 +796,8 @@ static void mlx5e_disable_sq(struct mlx5e_sq *sq)
struct mlx5_core_dev *mdev = priv->mdev;
mlx5_core_destroy_sq(mdev, sq->sqn);
+ if (sq->rate_limit)
+ mlx5_rl_remove_rate(mdev, sq->rate_limit);
}
static int mlx5e_open_sq(struct mlx5e_channel *c,
@@ -696,13 +815,15 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
if (err)
goto err_destroy_sq;
- err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
+ false, 0);
if (err)
goto err_disable_sq;
- set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
- netdev_tx_reset_queue(sq->txq);
- netif_tx_start_queue(sq->txq);
+ if (sq->txq) {
+ netdev_tx_reset_queue(sq->txq);
+ netif_tx_start_queue(sq->txq);
+ }
return 0;
@@ -723,22 +844,20 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
- clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
- napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
- netif_tx_disable_queue(sq->txq);
-
- /* ensure hw is notified of all pending wqes */
- if (mlx5e_sq_has_room_for(sq, 1))
- mlx5e_send_nop(sq, true);
+ set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
+ /* prevent netif_tx_wake_queue */
+ napi_synchronize(&sq->channel->napi);
- mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
- while (sq->cc != sq->pc) /* wait till sq is empty */
- msleep(20);
+ if (sq->txq) {
+ netif_tx_disable_queue(sq->txq);
- /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
- napi_synchronize(&sq->channel->napi);
+ /* last doorbell out, godspeed .. */
+ if (mlx5e_sq_has_room_for(sq, 1))
+ mlx5e_send_nop(sq, true);
+ }
mlx5e_disable_sq(sq);
+ mlx5e_free_tx_descs(sq);
mlx5e_destroy_sq(sq);
}
@@ -776,7 +895,7 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
mcq->irqn = irqn;
- mcq->uar = &priv->cq_uar;
+ mcq->uar = &mdev->mlx5e_res.cq_uar;
for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
@@ -823,6 +942,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+ MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
MLX5_SET(cqc, cqc, c_eqn, eqn);
MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
@@ -852,8 +972,7 @@ static void mlx5e_disable_cq(struct mlx5e_cq *cq)
static int mlx5e_open_cq(struct mlx5e_channel *c,
struct mlx5e_cq_param *param,
struct mlx5e_cq *cq,
- u16 moderation_usecs,
- u16 moderation_frames)
+ struct mlx5e_cq_moder moderation)
{
int err;
struct mlx5e_priv *priv = c->priv;
@@ -869,8 +988,8 @@ static int mlx5e_open_cq(struct mlx5e_channel *c,
if (MLX5_CAP_GEN(mdev, cq_moderation))
mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
- moderation_usecs,
- moderation_frames);
+ moderation.usec,
+ moderation.pkts);
return 0;
err_destroy_cq:
@@ -899,8 +1018,7 @@ static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
for (tc = 0; tc < c->num_tc; tc++) {
err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
- priv->params.tx_cq_moderation_usec,
- priv->params.tx_cq_moderation_pkts);
+ priv->params.tx_cq_moderation);
if (err)
goto err_close_tx_cqs;
}
@@ -955,19 +1073,96 @@ static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
{
int i;
- for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
+ for (i = 0; i < priv->profile->max_tc; i++)
priv->channeltc_to_txq_map[ix][i] =
ix + i * priv->params.num_channels;
}
+static int mlx5e_set_sq_maxrate(struct net_device *dev,
+ struct mlx5e_sq *sq, u32 rate)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 rl_index = 0;
+ int err;
+
+ if (rate == sq->rate_limit)
+ /* nothing to do */
+ return 0;
+
+ if (sq->rate_limit)
+ /* remove current rl index to free space to next ones */
+ mlx5_rl_remove_rate(mdev, sq->rate_limit);
+
+ sq->rate_limit = 0;
+
+ if (rate) {
+ err = mlx5_rl_add_rate(mdev, rate, &rl_index);
+ if (err) {
+ netdev_err(dev, "Failed configuring rate %u: %d\n",
+ rate, err);
+ return err;
+ }
+ }
+
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
+ MLX5_SQC_STATE_RDY, true, rl_index);
+ if (err) {
+ netdev_err(dev, "Failed configuring rate %u: %d\n",
+ rate, err);
+ /* remove the rate from the table */
+ if (rate)
+ mlx5_rl_remove_rate(mdev, rate);
+ return err;
+ }
+
+ sq->rate_limit = rate;
+ return 0;
+}
+
+static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_sq *sq = priv->txq_to_sq_map[index];
+ int err = 0;
+
+ if (!mlx5_rl_is_supported(mdev)) {
+ netdev_err(dev, "Rate limiting is not supported on this device\n");
+ return -EINVAL;
+ }
+
+ /* rate is given in Mb/sec, HW config is in Kb/sec */
+ rate = rate << 10;
+
+ /* Check whether rate in valid range, 0 is always valid */
+ if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
+ netdev_err(dev, "TX rate %u, is not in range\n", rate);
+ return -ERANGE;
+ }
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ err = mlx5e_set_sq_maxrate(dev, sq, rate);
+ if (!err)
+ priv->tx_rates[index] = rate;
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
{
+ struct mlx5e_cq_moder icosq_cq_moder = {0, 0};
struct net_device *netdev = priv->netdev;
+ struct mlx5e_cq_moder rx_cq_profile;
int cpu = mlx5e_get_cpu(priv, ix);
struct mlx5e_channel *c;
+ struct mlx5e_sq *sq;
int err;
+ int i;
c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
if (!c)
@@ -978,29 +1173,51 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->cpu = cpu;
c->pdev = &priv->mdev->pdev->dev;
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mkey.key);
+ c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = priv->params.num_tc;
+ if (priv->params.rx_am_enabled)
+ rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
+ else
+ rx_cq_profile = priv->params.rx_cq_moderation;
+
mlx5e_build_channeltc_to_txq_map(priv, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
- err = mlx5e_open_tx_cqs(c, cparam);
+ err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder);
if (err)
goto err_napi_del;
+ err = mlx5e_open_tx_cqs(c, cparam);
+ if (err)
+ goto err_close_icosq_cq;
+
err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
- priv->params.rx_cq_moderation_usec,
- priv->params.rx_cq_moderation_pkts);
+ rx_cq_profile);
if (err)
goto err_close_tx_cqs;
napi_enable(&c->napi);
- err = mlx5e_open_sqs(c, cparam);
+ err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
if (err)
goto err_disable_napi;
+ err = mlx5e_open_sqs(c, cparam);
+ if (err)
+ goto err_close_icosq;
+
+ for (i = 0; i < priv->params.num_tc; i++) {
+ u32 txq_ix = priv->channeltc_to_txq_map[ix][i];
+
+ if (priv->tx_rates[txq_ix]) {
+ sq = priv->txq_to_sq_map[txq_ix];
+ mlx5e_set_sq_maxrate(priv->netdev, sq,
+ priv->tx_rates[txq_ix]);
+ }
+ }
+
err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
if (err)
goto err_close_sqs;
@@ -1013,6 +1230,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
err_close_sqs:
mlx5e_close_sqs(c);
+err_close_icosq:
+ mlx5e_close_sq(&c->icosq);
+
err_disable_napi:
napi_disable(&c->napi);
mlx5e_close_cq(&c->rq.cq);
@@ -1020,6 +1240,9 @@ err_disable_napi:
err_close_tx_cqs:
mlx5e_close_tx_cqs(c);
+err_close_icosq_cq:
+ mlx5e_close_cq(&c->icosq.cq);
+
err_napi_del:
netif_napi_del(&c->napi);
napi_hash_del(&c->napi);
@@ -1032,9 +1255,11 @@ static void mlx5e_close_channel(struct mlx5e_channel *c)
{
mlx5e_close_rq(&c->rq);
mlx5e_close_sqs(c);
+ mlx5e_close_sq(&c->icosq);
napi_disable(&c->napi);
mlx5e_close_cq(&c->rq.cq);
mlx5e_close_tx_cqs(c);
+ mlx5e_close_cq(&c->icosq.cq);
netif_napi_del(&c->napi);
napi_hash_del(&c->napi);
@@ -1049,14 +1274,28 @@ static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
void *rqc = param->rqc;
void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
- MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ MLX5_SET(wq, wq, log_wqe_num_of_strides,
+ priv->params.mpwqe_log_num_strides - 9);
+ MLX5_SET(wq, wq, log_wqe_stride_size,
+ priv->params.mpwqe_log_stride_sz - 6);
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ }
+
MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
- MLX5_SET(wq, wq, pd, priv->pdn);
+ MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
+ MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->wq.linear = 1;
+
+ param->am_enabled = priv->params.rx_am_enabled;
}
static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
@@ -1068,18 +1307,29 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
}
+static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
+
+ param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
+}
+
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
struct mlx5e_sq_param *param)
{
void *sqc = param->sqc;
void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ mlx5e_build_sq_param_common(priv, param);
MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
- MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
- MLX5_SET(wq, wq, pd, priv->pdn);
- param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
param->max_inline = priv->params.tx_max_inline;
+ param->min_inline_mode = priv->params.tx_min_inline_mode;
}
static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
@@ -1087,17 +1337,33 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
{
void *cqc = param->cqc;
- MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
+ MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index);
}
static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
struct mlx5e_cq_param *param)
{
void *cqc = param->cqc;
+ u8 log_cq_size;
- MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ log_cq_size = priv->params.log_rq_size +
+ priv->params.mpwqe_log_num_strides;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ log_cq_size = priv->params.log_rq_size;
+ }
+
+ MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
+ if (priv->params.rx_cqe_compress) {
+ MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
+ MLX5_SET(cqc, cqc, cqe_comp_en, 1);
+ }
mlx5e_build_common_cq_param(priv, param);
+
+ param->cq_period_mode = priv->params.rx_cq_period_mode;
}
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
@@ -1105,25 +1371,56 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
{
void *cqc = param->cqc;
- MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
+ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
mlx5e_build_common_cq_param(priv, param);
+
+ param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+}
+
+static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param,
+ u8 log_wq_size)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
+
+ mlx5e_build_common_cq_param(priv, param);
+
+ param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
}
-static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
- struct mlx5e_channel_param *cparam)
+static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param,
+ u8 log_wq_size)
{
- memset(cparam, 0, sizeof(*cparam));
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ mlx5e_build_sq_param_common(priv, param);
+
+ MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
+ MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
+
+ param->icosq = true;
+}
+
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
+{
+ u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
mlx5e_build_rq_param(priv, &cparam->rq);
mlx5e_build_sq_param(priv, &cparam->sq);
+ mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
+ mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
}
static int mlx5e_open_channels(struct mlx5e_priv *priv)
{
- struct mlx5e_channel_param cparam;
+ struct mlx5e_channel_param *cparam;
int nch = priv->params.num_channels;
int err = -ENOMEM;
int i;
@@ -1135,12 +1432,15 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
sizeof(struct mlx5e_sq *), GFP_KERNEL);
- if (!priv->channel || !priv->txq_to_sq_map)
+ cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
+
+ if (!priv->channel || !priv->txq_to_sq_map || !cparam)
goto err_free_txq_to_sq_map;
- mlx5e_build_channel_param(priv, &cparam);
+ mlx5e_build_channel_param(priv, cparam);
+
for (i = 0; i < nch; i++) {
- err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+ err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
if (err)
goto err_close_channels;
}
@@ -1151,6 +1451,12 @@ static int mlx5e_open_channels(struct mlx5e_priv *priv)
goto err_close_channels;
}
+ /* FIXME: This is a W/A for tx timeout watch dog false alarm when
+ * polling for inactive tx queues.
+ */
+ netif_tx_start_all_queues(priv->netdev);
+
+ kfree(cparam);
return 0;
err_close_channels:
@@ -1160,6 +1466,7 @@ err_close_channels:
err_free_txq_to_sq_map:
kfree(priv->txq_to_sq_map);
kfree(priv->channel);
+ kfree(cparam);
return err;
}
@@ -1168,6 +1475,12 @@ static void mlx5e_close_channels(struct mlx5e_priv *priv)
{
int i;
+ /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
+ * polling for inactive tx queues.
+ */
+ netif_tx_stop_all_queues(priv->netdev);
+ netif_tx_disable(priv->netdev);
+
for (i = 0; i < priv->params.num_channels; i++)
mlx5e_close_channel(priv->channel[i]);
@@ -1199,48 +1512,37 @@ static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
int ix = i;
+ u32 rqn;
if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
ix = priv->params.indirection_rqt[ix];
- MLX5_SET(rqtc, rqtc, rq_num[i],
- test_bit(MLX5E_STATE_OPENED, &priv->state) ?
- priv->channel[ix]->rq.rqn :
- priv->drop_rq.rqn);
+ rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+ priv->channel[ix]->rq.rqn :
+ priv->drop_rq.rqn;
+ MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
}
}
-static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
- enum mlx5e_rqt_ix rqt_ix)
+static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
+ int ix)
{
+ u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
+ priv->channel[ix]->rq.rqn :
+ priv->drop_rq.rqn;
- switch (rqt_ix) {
- case MLX5E_INDIRECTION_RQT:
- mlx5e_fill_indir_rqt_rqns(priv, rqtc);
-
- break;
-
- default: /* MLX5E_SINGLE_RQ_RQT */
- MLX5_SET(rqtc, rqtc, rq_num[0],
- test_bit(MLX5E_STATE_OPENED, &priv->state) ?
- priv->channel[0]->rq.rqn :
- priv->drop_rq.rqn);
-
- break;
- }
+ MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
}
-static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
+ int ix, struct mlx5e_rqt *rqt)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 *in;
void *rqtc;
int inlen;
- int sz;
int err;
-
- sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
+ u32 *in;
inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
@@ -1252,26 +1554,62 @@ static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
- mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+ if (sz > 1) /* RSS */
+ mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+ else
+ mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
- err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
+ err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
+ if (!err)
+ rqt->enabled = true;
kvfree(in);
+ return err;
+}
+
+void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
+{
+ rqt->enabled = false;
+ mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
+}
+
+static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rqt *rqt = &priv->indir_rqt;
+
+ return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt);
+}
+
+int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
+{
+ struct mlx5e_rqt *rqt;
+ int err;
+ int ix;
+
+ for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+ rqt = &priv->direct_tir[ix].rqt;
+ err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt);
+ if (err)
+ goto err_destroy_rqts;
+ }
+
+ return 0;
+
+err_destroy_rqts:
+ for (ix--; ix >= 0; ix--)
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
return err;
}
-int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
+int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
{
struct mlx5_core_dev *mdev = priv->mdev;
- u32 *in;
void *rqtc;
int inlen;
- int sz;
+ u32 *in;
int err;
- sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
-
inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
in = mlx5_vzalloc(inlen);
if (!in)
@@ -1280,27 +1618,36 @@ int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
-
- mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
+ if (sz > 1) /* RSS */
+ mlx5e_fill_indir_rqt_rqns(priv, rqtc);
+ else
+ mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
- err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
+ err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
kvfree(in);
return err;
}
-static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
-{
- mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
-}
-
static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
{
- mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
- mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
+ u32 rqtn;
+ int ix;
+
+ if (priv->indir_rqt.enabled) {
+ rqtn = priv->indir_rqt.rqtn;
+ mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
+ }
+
+ for (ix = 0; ix < priv->params.num_channels; ix++) {
+ if (!priv->direct_tir[ix].rqt.enabled)
+ continue;
+ rqtn = priv->direct_tir[ix].rqt.rqtn;
+ mlx5e_redirect_rqt(priv, rqtn, 1, ix);
+ }
}
static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
@@ -1345,6 +1692,7 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
int inlen;
int err;
int tt;
+ int ix;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
@@ -1356,53 +1704,26 @@ static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
mlx5e_build_tir_ctx_lro(tirc, priv);
- for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
- err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
+ inlen);
if (err)
- break;
+ goto free_in;
}
- kvfree(in);
-
- return err;
-}
-
-static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
- u32 tirn)
-{
- void *in;
- int inlen;
- int err;
-
- inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
- in = mlx5_vzalloc(inlen);
- if (!in)
- return -ENOMEM;
-
- MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
-
- err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
+ for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
+ err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
+ in, inlen);
+ if (err)
+ goto free_in;
+ }
+free_in:
kvfree(in);
return err;
}
-static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
-{
- int err;
- int i;
-
- for (i = 0; i < MLX5E_NUM_TT; i++) {
- err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
- priv->tirn[i]);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1464,13 +1785,17 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
netdev_set_num_tc(netdev, ntc);
+ /* Map netdev TCs to offset 0
+ * We have our own UP to TXQ mapping for QoS
+ */
for (tc = 0; tc < ntc; tc++)
- netdev_set_tc_queue(netdev, tc, nch, tc * nch);
+ netdev_set_tc_queue(netdev, tc, nch, 0);
}
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
int num_txqs;
int err;
@@ -1482,10 +1807,6 @@ int mlx5e_open_locked(struct net_device *netdev)
netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
- err = mlx5e_set_dev_port_mtu(netdev);
- if (err)
- goto err_clear_state_opened_flag;
-
err = mlx5e_open_channels(priv);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
@@ -1493,7 +1814,7 @@ int mlx5e_open_locked(struct net_device *netdev)
goto err_clear_state_opened_flag;
}
- err = mlx5e_refresh_tirs_self_loopback_enable(priv);
+ err = mlx5e_refresh_tirs_self_loopback_enable(priv->mdev);
if (err) {
netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
__func__, err);
@@ -1503,9 +1824,17 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_redirect_rqts(priv);
mlx5e_update_carrier(priv);
mlx5e_timestamp_init(priv);
+#ifdef CONFIG_RFS_ACCEL
+ priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
+#endif
+ if (priv->profile->update_stats)
+ queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
- queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
-
+ if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+ err = mlx5e_add_sqs_fwd_rules(priv);
+ if (err)
+ goto err_close_channels;
+ }
return 0;
err_close_channels:
@@ -1515,7 +1844,7 @@ err_clear_state_opened_flag:
return err;
}
-static int mlx5e_open(struct net_device *netdev)
+int mlx5e_open(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
@@ -1530,6 +1859,7 @@ static int mlx5e_open(struct net_device *netdev)
int mlx5e_close_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
/* May already be CLOSED in case a previous configuration operation
* (e.g RX/TX queue size change) that involves close&open failed.
@@ -1539,6 +1869,9 @@ int mlx5e_close_locked(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ mlx5e_remove_sqs_fwd_rules(priv);
+
mlx5e_timestamp_cleanup(priv);
netif_carrier_off(priv->netdev);
mlx5e_redirect_rqts(priv);
@@ -1547,7 +1880,7 @@ int mlx5e_close_locked(struct net_device *netdev)
return 0;
}
-static int mlx5e_close(struct net_device *netdev)
+int mlx5e_close(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
@@ -1606,7 +1939,7 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
mcq->comp = mlx5e_completion_event;
mcq->event = mlx5e_cq_error_event;
mcq->irqn = irqn;
- mcq->uar = &priv->cq_uar;
+ mcq->uar = &mdev->mlx5e_res.cq_uar;
cq->priv = priv;
@@ -1672,7 +2005,7 @@ static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
memset(in, 0, sizeof(in));
MLX5_SET(tisc, tisc, prio, tc << 1);
- MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
+ MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
}
@@ -1682,12 +2015,12 @@ static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
}
-static int mlx5e_create_tises(struct mlx5e_priv *priv)
+int mlx5e_create_tises(struct mlx5e_priv *priv)
{
int err;
int tc;
- for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) {
+ for (tc = 0; tc < priv->profile->max_tc; tc++) {
err = mlx5e_create_tis(priv, tc);
if (err)
goto err_close_tises;
@@ -1702,19 +2035,20 @@ err_close_tises:
return err;
}
-static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
+void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
{
int tc;
- for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++)
+ for (tc = 0; tc < priv->profile->max_tc; tc++)
mlx5e_destroy_tis(priv, tc);
}
-static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+ enum mlx5e_traffic_types tt)
{
void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
- MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
+ MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
MLX5_HASH_FIELD_SEL_DST_IP)
@@ -1731,19 +2065,8 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
mlx5e_build_tir_ctx_lro(tirc, priv);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
-
- switch (tt) {
- case MLX5E_TT_ANY:
- MLX5_SET(tirc, tirc, indirect_table,
- priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
- MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
- break;
- default:
- MLX5_SET(tirc, tirc, indirect_table,
- priv->rqtn[MLX5E_INDIRECTION_RQT]);
- mlx5e_build_tir_ctx_hash(tirc, priv);
- break;
- }
+ MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
+ mlx5e_build_tir_ctx_hash(tirc, priv);
switch (tt) {
case MLX5E_TT_IPV4_TCP:
@@ -1823,64 +2146,132 @@ static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
MLX5_SET(rx_hash_field_select, hfso, selected_fields,
MLX5_HASH_IP);
break;
+ default:
+ WARN_ONCE(true,
+ "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
}
}
-static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
+static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
+ u32 rqtn)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 *in;
+ MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
+
+ mlx5e_build_tir_ctx_lro(tirc, priv);
+
+ MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table, rqtn);
+ MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
+}
+
+static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
+{
+ struct mlx5e_tir *tir;
void *tirc;
int inlen;
int err;
+ u32 *in;
+ int tt;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
- tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
+ memset(in, 0, inlen);
+ tir = &priv->indir_tir[tt];
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ mlx5e_build_indir_tir_ctx(priv, tirc, tt);
+ err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
+ if (err)
+ goto err_destroy_tirs;
+ }
- mlx5e_build_tir_ctx(priv, tirc, tt);
+ kvfree(in);
+
+ return 0;
- err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+err_destroy_tirs:
+ for (tt--; tt >= 0; tt--)
+ mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
kvfree(in);
return err;
}
-static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
-{
- mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
-}
-
-static int mlx5e_create_tirs(struct mlx5e_priv *priv)
+int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
{
+ int nch = priv->profile->max_nch(priv->mdev);
+ struct mlx5e_tir *tir;
+ void *tirc;
+ int inlen;
int err;
- int i;
+ u32 *in;
+ int ix;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
- for (i = 0; i < MLX5E_NUM_TT; i++) {
- err = mlx5e_create_tir(priv, i);
+ for (ix = 0; ix < nch; ix++) {
+ memset(in, 0, inlen);
+ tir = &priv->direct_tir[ix];
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+ mlx5e_build_direct_tir_ctx(priv, tirc,
+ priv->direct_tir[ix].rqt.rqtn);
+ err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
if (err)
- goto err_destroy_tirs;
+ goto err_destroy_ch_tirs;
}
+ kvfree(in);
+
return 0;
-err_destroy_tirs:
- for (i--; i >= 0; i--)
- mlx5e_destroy_tir(priv, i);
+err_destroy_ch_tirs:
+ for (ix--; ix >= 0; ix--)
+ mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
+
+ kvfree(in);
return err;
}
-static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
+static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
+ mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
+}
+
+void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
{
+ int nch = priv->profile->max_nch(priv->mdev);
int i;
- for (i = 0; i < MLX5E_NUM_TT; i++)
- mlx5e_destroy_tir(priv, i);
+ for (i = 0; i < nch; i++)
+ mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
+}
+
+int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
+{
+ int err = 0;
+ int i;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return 0;
+
+ for (i = 0; i < priv->params.num_channels; i++) {
+ err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
+ if (err)
+ return err;
+ }
+
+ return 0;
}
static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
@@ -1923,6 +2314,8 @@ static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
return mlx5e_configure_flower(priv, proto, tc->cls_flower);
case TC_CLSFLOWER_DESTROY:
return mlx5e_delete_flower(priv, tc->cls_flower);
+ case TC_CLSFLOWER_STATS:
+ return mlx5e_stats_flower(priv, tc->cls_flower);
}
default:
return -EOPNOTSUPP;
@@ -1935,23 +2328,41 @@ mqprio:
return mlx5e_setup_tc(dev, tc->tc);
}
-static struct rtnl_link_stats64 *
+struct rtnl_link_stats64 *
mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_sw_stats *sstats = &priv->stats.sw;
struct mlx5e_vport_stats *vstats = &priv->stats.vport;
-
- stats->rx_packets = vstats->rx_packets;
- stats->rx_bytes = vstats->rx_bytes;
- stats->tx_packets = vstats->tx_packets;
- stats->tx_bytes = vstats->tx_bytes;
- stats->multicast = vstats->rx_multicast_packets +
- vstats->tx_multicast_packets;
- stats->tx_errors = vstats->tx_error_packets;
- stats->rx_errors = vstats->rx_error_packets;
- stats->tx_dropped = vstats->tx_queue_dropped;
- stats->rx_crc_errors = 0;
- stats->rx_length_errors = 0;
+ struct mlx5e_pport_stats *pstats = &priv->stats.pport;
+
+ stats->rx_packets = sstats->rx_packets;
+ stats->rx_bytes = sstats->rx_bytes;
+ stats->tx_packets = sstats->tx_packets;
+ stats->tx_bytes = sstats->tx_bytes;
+
+ stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
+ stats->tx_dropped = sstats->tx_queue_dropped;
+
+ stats->rx_length_errors =
+ PPORT_802_3_GET(pstats, a_in_range_length_errors) +
+ PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
+ PPORT_802_3_GET(pstats, a_frame_too_long_errors);
+ stats->rx_crc_errors =
+ PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
+ stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
+ stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
+ stats->tx_carrier_errors =
+ PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
+ stats->rx_frame_errors;
+ stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
+
+ /* vport multicast also counts packets that are dropped due to steering
+ * or rx out of buffer
+ */
+ stats->multicast =
+ VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
return stats;
}
@@ -1980,50 +2391,154 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
return 0;
}
-static int mlx5e_set_features(struct net_device *netdev,
- netdev_features_t features)
+#define MLX5E_SET_FEATURE(netdev, feature, enable) \
+ do { \
+ if (enable) \
+ netdev->features |= feature; \
+ else \
+ netdev->features &= ~feature; \
+ } while (0)
+
+typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
+
+static int set_feature_lro(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
- int err = 0;
- netdev_features_t changes = features ^ netdev->features;
+ bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ int err;
mutex_lock(&priv->state_lock);
- if (changes & NETIF_F_LRO) {
- bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
-
- if (was_opened)
- mlx5e_close_locked(priv->netdev);
-
- priv->params.lro_en = !!(features & NETIF_F_LRO);
- err = mlx5e_modify_tirs_lro(priv);
- if (err)
- mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
- err);
+ if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
+ mlx5e_close_locked(priv->netdev);
- if (was_opened)
- err = mlx5e_open_locked(priv->netdev);
+ priv->params.lro_en = enable;
+ err = mlx5e_modify_tirs_lro(priv);
+ if (err) {
+ netdev_err(netdev, "lro modify failed, %d\n", err);
+ priv->params.lro_en = !enable;
}
+ if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
+ mlx5e_open_locked(priv->netdev);
+
mutex_unlock(&priv->state_lock);
- if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
- if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
- mlx5e_enable_vlan_filter(priv);
- else
- mlx5e_disable_vlan_filter(priv);
- }
+ return err;
+}
+
+static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ if (enable)
+ mlx5e_enable_vlan_filter(priv);
+ else
+ mlx5e_disable_vlan_filter(priv);
+
+ return 0;
+}
+
+static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
- if ((changes & NETIF_F_HW_TC) && !(features & NETIF_F_HW_TC) &&
- mlx5e_tc_num_filters(priv)) {
+ if (!enable && mlx5e_tc_num_filters(priv)) {
netdev_err(netdev,
"Active offloaded tc filters, can't turn hw_tc_offload off\n");
return -EINVAL;
}
+ return 0;
+}
+
+static int set_feature_rx_all(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ return mlx5_set_port_fcs(mdev, !enable);
+}
+
+static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+
+ priv->params.vlan_strip_disable = !enable;
+ err = mlx5e_modify_rqs_vsd(priv, !enable);
+ if (err)
+ priv->params.vlan_strip_disable = enable;
+
+ mutex_unlock(&priv->state_lock);
+
return err;
}
+#ifdef CONFIG_RFS_ACCEL
+static int set_feature_arfs(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ if (enable)
+ err = mlx5e_arfs_enable(priv);
+ else
+ err = mlx5e_arfs_disable(priv);
+
+ return err;
+}
+#endif
+
+static int mlx5e_handle_feature(struct net_device *netdev,
+ netdev_features_t wanted_features,
+ netdev_features_t feature,
+ mlx5e_feature_handler feature_handler)
+{
+ netdev_features_t changes = wanted_features ^ netdev->features;
+ bool enable = !!(wanted_features & feature);
+ int err;
+
+ if (!(changes & feature))
+ return 0;
+
+ err = feature_handler(netdev, enable);
+ if (err) {
+ netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
+ enable ? "Enable" : "Disable", feature, err);
+ return err;
+ }
+
+ MLX5E_SET_FEATURE(netdev, feature, enable);
+ return 0;
+}
+
+static int mlx5e_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ int err;
+
+ err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
+ set_feature_lro);
+ err |= mlx5e_handle_feature(netdev, features,
+ NETIF_F_HW_VLAN_CTAG_FILTER,
+ set_feature_vlan_filter);
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
+ set_feature_tc_num_filters);
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
+ set_feature_rx_all);
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
+ set_feature_rx_vlan);
+#ifdef CONFIG_RFS_ACCEL
+ err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
+ set_feature_arfs);
+#endif
+
+ return err ? -EINVAL : 0;
+}
+
#define MXL5_HW_MIN_MTU 64
#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
@@ -2035,6 +2550,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
u16 max_mtu;
u16 min_mtu;
int err = 0;
+ bool reset;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
@@ -2050,13 +2566,18 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
mutex_lock(&priv->state_lock);
+ reset = !priv->params.lro_en &&
+ (priv->params.rq_wq_type !=
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
+ if (was_opened && reset)
mlx5e_close_locked(netdev);
netdev->mtu = new_mtu;
+ mlx5e_set_dev_port_mtu(netdev);
- if (was_opened)
+ if (was_opened && reset)
err = mlx5e_open_locked(netdev);
mutex_unlock(&priv->state_lock);
@@ -2093,6 +2614,21 @@ static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
vlan, qos);
}
+static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
+}
+
+static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
+}
static int mlx5_vport_link2ifla(u8 esw_link)
{
switch (esw_link) {
@@ -2149,27 +2685,32 @@ static int mlx5e_get_vf_stats(struct net_device *dev,
vf_stats);
}
-#if IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN)
static void mlx5e_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
if (!mlx5e_vxlan_allowed(priv->mdev))
return;
- mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 1);
+ mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
}
static void mlx5e_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
if (!mlx5e_vxlan_allowed(priv->mdev))
return;
- mlx5e_vxlan_queue_work(priv, sa_family, be16_to_cpu(port), 0);
+ mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
}
static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
@@ -2221,7 +2762,29 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
return features;
}
-#endif
+
+static void mlx5e_tx_timeout(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ bool sched_work = false;
+ int i;
+
+ netdev_err(dev, "TX timeout detected\n");
+
+ for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
+ struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
+
+ if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
+ continue;
+ sched_work = true;
+ set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
+ netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
+ i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
+ }
+
+ if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
+ schedule_work(&priv->tx_timeout_work);
+}
static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_open = mlx5e_open,
@@ -2237,6 +2800,11 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_set_features = mlx5e_set_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
+ .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
+#endif
+ .ndo_tx_timeout = mlx5e_tx_timeout,
};
static const struct net_device_ops mlx5e_netdev_ops_sriov = {
@@ -2253,16 +2821,21 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
.ndo_set_features = mlx5e_set_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
-#ifdef CONFIG_MLX5_CORE_EN_VXLAN
- .ndo_add_vxlan_port = mlx5e_add_vxlan_port,
- .ndo_del_vxlan_port = mlx5e_del_vxlan_port,
+ .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
+ .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
+ .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
.ndo_features_check = mlx5e_features_check,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
.ndo_set_vf_mac = mlx5e_set_vf_mac,
.ndo_set_vf_vlan = mlx5e_set_vf_vlan,
+ .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
+ .ndo_set_vf_trust = mlx5e_set_vf_trust,
.ndo_get_vf_config = mlx5e_get_vf_config,
.ndo_set_vf_link_state = mlx5e_set_vf_link_state,
.ndo_get_vf_stats = mlx5e_get_vf_stats,
+ .ndo_tx_timeout = mlx5e_tx_timeout,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2317,51 +2890,187 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
}
#endif
-void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
+void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
+ u32 *indirection_rqt, int len,
int num_channels)
{
+ int node = mdev->priv.numa_node;
+ int node_num_of_cores;
int i;
+ if (node == -1)
+ node = first_online_node;
+
+ node_num_of_cores = cpumask_weight(cpumask_of_node(node));
+
+ if (node_num_of_cores)
+ num_channels = min_t(int, num_channels, node_num_of_cores);
+
for (i = 0; i < len; i++)
indirection_rqt[i] = i % num_channels;
}
-static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- int num_channels)
+static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
+{
+ return MLX5_CAP_GEN(mdev, striding_rq) &&
+ MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
+ MLX5_CAP_ETH(mdev, reg_umr_sq);
+}
+
+static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
+{
+ enum pcie_link_width width;
+ enum pci_bus_speed speed;
+ int err = 0;
+
+ err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
+ if (err)
+ return err;
+
+ if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
+ return -EINVAL;
+
+ switch (speed) {
+ case PCIE_SPEED_2_5GT:
+ *pci_bw = 2500 * width;
+ break;
+ case PCIE_SPEED_5_0GT:
+ *pci_bw = 5000 * width;
+ break;
+ case PCIE_SPEED_8_0GT:
+ *pci_bw = 8000 * width;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
+{
+ return (link_speed && pci_bw &&
+ (pci_bw < 40000) && (pci_bw < link_speed));
+}
+
+void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ params->rx_cq_period_mode = cq_period_mode;
+
+ params->rx_cq_moderation.pkts =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+ params->rx_cq_moderation.usec =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+
+ if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+ params->rx_cq_moderation.usec =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
+}
+
+static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
+ u8 *min_inline_mode)
+{
+ switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
+ case MLX5E_INLINE_MODE_L2:
+ *min_inline_mode = MLX5_INLINE_MODE_L2;
+ break;
+ case MLX5E_INLINE_MODE_VPORT_CONTEXT:
+ mlx5_query_nic_vport_min_inline(mdev,
+ min_inline_mode);
+ break;
+ case MLX5_INLINE_MODE_NOT_REQUIRED:
+ *min_inline_mode = MLX5_INLINE_MODE_NONE;
+ break;
+ }
+}
+
+static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ u32 link_speed = 0;
+ u32 pci_bw = 0;
+ u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+ MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
priv->params.log_sq_size =
MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
- priv->params.log_rq_size =
- MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
- priv->params.rx_cq_moderation_usec =
- MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
- priv->params.rx_cq_moderation_pkts =
- MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
- priv->params.tx_cq_moderation_usec =
+ priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ?
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
+ MLX5_WQ_TYPE_LINKED_LIST;
+
+ /* set CQE compression */
+ priv->params.rx_cqe_compress_admin = false;
+ if (MLX5_CAP_GEN(mdev, cqe_compression) &&
+ MLX5_CAP_GEN(mdev, vport_group_manager)) {
+ mlx5e_get_max_linkspeed(mdev, &link_speed);
+ mlx5e_get_pci_bw(mdev, &pci_bw);
+ mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
+ link_speed, pci_bw);
+ priv->params.rx_cqe_compress_admin =
+ cqe_compress_heuristic(link_speed, pci_bw);
+ }
+
+ priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin;
+
+ switch (priv->params.rq_wq_type) {
+ case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+ priv->params.mpwqe_log_stride_sz =
+ priv->params.rx_cqe_compress ?
+ MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
+ MLX5_MPWRQ_LOG_STRIDE_SIZE;
+ priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
+ priv->params.mpwqe_log_stride_sz;
+ priv->params.lro_en = true;
+ break;
+ default: /* MLX5_WQ_TYPE_LINKED_LIST */
+ priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ }
+
+ mlx5_core_info(mdev,
+ "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
+ priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
+ BIT(priv->params.log_rq_size),
+ BIT(priv->params.mpwqe_log_stride_sz),
+ priv->params.rx_cqe_compress_admin);
+
+ priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+ BIT(priv->params.log_rq_size));
+
+ priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
+
+ priv->params.tx_cq_moderation.usec =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
- priv->params.tx_cq_moderation_pkts =
+ priv->params.tx_cq_moderation.pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
- priv->params.min_rx_wqes =
- MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
+ mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
priv->params.num_tc = 1;
priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
netdev_rss_key_fill(priv->params.toeplitz_hash_key,
sizeof(priv->params.toeplitz_hash_key));
- mlx5e_build_default_indir_rqt(priv->params.indirection_rqt,
- MLX5E_INDIR_RQT_SIZE, num_channels);
+ mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
+ MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
priv->params.lro_wqe_sz =
MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ /* Initialize pflags */
+ MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
+ priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+
priv->mdev = mdev;
priv->netdev = netdev;
- priv->params.num_channels = num_channels;
+ priv->params.num_channels = profile->max_nch(mdev);
+ priv->profile = profile;
+ priv->ppriv = ppriv;
#ifdef CONFIG_MLX5_CORE_EN_DCB
mlx5e_ets_init(priv);
@@ -2371,6 +3080,7 @@ static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+ INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
}
@@ -2386,10 +3096,16 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
}
}
-static void mlx5e_build_netdev(struct net_device *netdev)
+static const struct switchdev_ops mlx5e_switchdev_ops = {
+ .switchdev_port_attr_get = mlx5e_attr_get,
+};
+
+static void mlx5e_build_nic_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
+ bool fcs_supported;
+ bool fcs_enabled;
SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
@@ -2424,205 +3140,340 @@ static void mlx5e_build_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
if (mlx5e_vxlan_allowed(mdev)) {
- netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
netdev->hw_enc_features |= NETIF_F_IP_CSUM;
- netdev->hw_enc_features |= NETIF_F_RXCSUM;
+ netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
netdev->hw_enc_features |= NETIF_F_TSO6;
- netdev->hw_enc_features |= NETIF_F_RXHASH;
netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
+ NETIF_F_GSO_PARTIAL;
+ netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
}
+ mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
+
+ if (fcs_supported)
+ netdev->hw_features |= NETIF_F_RXALL;
+
netdev->features = netdev->hw_features;
if (!priv->params.lro_en)
netdev->features &= ~NETIF_F_LRO;
+ if (fcs_enabled)
+ netdev->features &= ~NETIF_F_RXALL;
+
#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
if (FT_CAP(flow_modify_en) &&
FT_CAP(modify_root) &&
FT_CAP(identified_miss_table_mode) &&
- FT_CAP(flow_table_modify))
- priv->netdev->hw_features |= NETIF_F_HW_TC;
+ FT_CAP(flow_table_modify)) {
+ netdev->hw_features |= NETIF_F_HW_TC;
+#ifdef CONFIG_RFS_ACCEL
+ netdev->hw_features |= NETIF_F_NTUPLE;
+#endif
+ }
netdev->features |= NETIF_F_HIGHDMA;
netdev->priv_flags |= IFF_UNICAST_FLT;
mlx5e_set_netdev_dev_addr(netdev);
+
+#ifdef CONFIG_NET_SWITCHDEV
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ netdev->switchdev_ops = &mlx5e_switchdev_ops;
+#endif
}
-static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
- struct mlx5_core_mkey *mkey)
+static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
+ if (err) {
+ mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
+ priv->q_counter = 0;
+ }
+}
+
+static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
+{
+ if (!priv->q_counter)
+ return;
+
+ mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
+}
+
+static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_create_mkey_mbox_in *in;
+ struct mlx5_mkey_seg *mkc;
+ int inlen = sizeof(*in);
+ u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
+ BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
int err;
- in = mlx5_vzalloc(sizeof(*in));
+ in = mlx5_vzalloc(inlen);
if (!in)
return -ENOMEM;
- in->seg.flags = MLX5_PERM_LOCAL_WRITE |
- MLX5_PERM_LOCAL_READ |
- MLX5_ACCESS_MODE_PA;
- in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
- in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ mkc = &in->seg;
+ mkc->status = MLX5_MKEY_STATUS_FREE;
+ mkc->flags = MLX5_PERM_UMR_EN |
+ MLX5_PERM_LOCAL_READ |
+ MLX5_PERM_LOCAL_WRITE |
+ MLX5_ACCESS_MODE_MTT;
+
+ npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
+
+ mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+ mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn);
+ mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
+ mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages));
+ mkc->log2_page_size = PAGE_SHIFT;
- err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL,
- NULL);
+ err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
+ NULL, NULL);
kvfree(in);
return err;
}
-static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
+static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
- struct net_device *netdev;
- struct mlx5e_priv *priv;
- int nch = mlx5e_get_max_num_channels(mdev);
- int err;
+ struct mlx5e_priv *priv = netdev_priv(netdev);
- if (mlx5e_check_required_hca_cap(mdev))
- return NULL;
+ mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
+ mlx5e_build_nic_netdev(netdev);
+ mlx5e_vxlan_init(priv);
+}
- netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
- nch * MLX5E_MAX_NUM_TC,
- nch);
- if (!netdev) {
- mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
- return NULL;
- }
+static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
- mlx5e_build_netdev_priv(mdev, netdev, nch);
- mlx5e_build_netdev(netdev);
+ mlx5e_vxlan_cleanup(priv);
- netif_carrier_off(netdev);
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ mlx5_eswitch_unregister_vport_rep(esw, 0);
+}
- priv = netdev_priv(netdev);
+static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+ int i;
- priv->wq = create_singlethread_workqueue("mlx5e");
- if (!priv->wq)
- goto err_free_netdev;
+ err = mlx5e_create_indirect_rqts(priv);
+ if (err) {
+ mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err);
+ return err;
+ }
- err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
+ err = mlx5e_create_direct_rqts(priv);
if (err) {
- mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
- goto err_destroy_wq;
+ mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
+ goto err_destroy_indirect_rqts;
}
- err = mlx5_core_alloc_pd(mdev, &priv->pdn);
+ err = mlx5e_create_indirect_tirs(priv);
if (err) {
- mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
- goto err_unmap_free_uar;
+ mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err);
+ goto err_destroy_direct_rqts;
}
- err = mlx5_core_alloc_transport_domain(mdev, &priv->tdn);
+ err = mlx5e_create_direct_tirs(priv);
if (err) {
- mlx5_core_err(mdev, "alloc td failed, %d\n", err);
- goto err_dealloc_pd;
+ mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
+ goto err_destroy_indirect_tirs;
}
- err = mlx5e_create_mkey(priv, priv->pdn, &priv->mkey);
+ err = mlx5e_create_flow_steering(priv);
if (err) {
- mlx5_core_err(mdev, "create mkey failed, %d\n", err);
- goto err_dealloc_transport_domain;
+ mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
+ goto err_destroy_direct_tirs;
}
+ err = mlx5e_tc_init(priv);
+ if (err)
+ goto err_destroy_flow_steering;
+
+ return 0;
+
+err_destroy_flow_steering:
+ mlx5e_destroy_flow_steering(priv);
+err_destroy_direct_tirs:
+ mlx5e_destroy_direct_tirs(priv);
+err_destroy_indirect_tirs:
+ mlx5e_destroy_indirect_tirs(priv);
+err_destroy_direct_rqts:
+ for (i = 0; i < priv->profile->max_nch(mdev); i++)
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+err_destroy_indirect_rqts:
+ mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+ return err;
+}
+
+static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
+{
+ int i;
+
+ mlx5e_tc_cleanup(priv);
+ mlx5e_destroy_flow_steering(priv);
+ mlx5e_destroy_direct_tirs(priv);
+ mlx5e_destroy_indirect_tirs(priv);
+ for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+ mlx5e_destroy_rqt(priv, &priv->indir_rqt);
+}
+
+static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
+{
+ int err;
+
err = mlx5e_create_tises(priv);
if (err) {
- mlx5_core_warn(mdev, "create tises failed, %d\n", err);
- goto err_destroy_mkey;
+ mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
+ return err;
}
- err = mlx5e_open_drop_rq(priv);
- if (err) {
- mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
- goto err_destroy_tises;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
+#endif
+ return 0;
+}
+
+static void mlx5e_nic_enable(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_eswitch_rep rep;
+
+ if (mlx5e_vxlan_allowed(mdev)) {
+ rtnl_lock();
+ udp_tunnel_get_rx_info(netdev);
+ rtnl_unlock();
}
- err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
- if (err) {
- mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
- goto err_close_drop_rq;
+ mlx5e_enable_async_events(priv);
+ queue_work(priv->wq, &priv->set_rx_mode_work);
+
+ if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+ mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
+ rep.load = mlx5e_nic_rep_load;
+ rep.unload = mlx5e_nic_rep_unload;
+ rep.vport = 0;
+ rep.priv_data = priv;
+ mlx5_eswitch_register_vport_rep(esw, &rep);
}
+}
- err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
- if (err) {
- mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
- goto err_destroy_rqt_indir;
+static void mlx5e_nic_disable(struct mlx5e_priv *priv)
+{
+ queue_work(priv->wq, &priv->set_rx_mode_work);
+ mlx5e_disable_async_events(priv);
+}
+
+static const struct mlx5e_profile mlx5e_nic_profile = {
+ .init = mlx5e_nic_init,
+ .cleanup = mlx5e_nic_cleanup,
+ .init_rx = mlx5e_init_nic_rx,
+ .cleanup_rx = mlx5e_cleanup_nic_rx,
+ .init_tx = mlx5e_init_nic_tx,
+ .cleanup_tx = mlx5e_cleanup_nic_tx,
+ .enable = mlx5e_nic_enable,
+ .disable = mlx5e_nic_disable,
+ .update_stats = mlx5e_update_stats,
+ .max_nch = mlx5e_get_max_num_channels,
+ .max_tc = MLX5E_MAX_NUM_TC,
+};
+
+void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
+ const struct mlx5e_profile *profile, void *ppriv)
+{
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int nch = profile->max_nch(mdev);
+ int err;
+
+ netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
+ nch * profile->max_tc,
+ nch);
+ if (!netdev) {
+ mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
+ return NULL;
}
- err = mlx5e_create_tirs(priv);
+ profile->init(mdev, netdev, profile, ppriv);
+
+ netif_carrier_off(netdev);
+
+ priv = netdev_priv(netdev);
+
+ priv->wq = create_singlethread_workqueue("mlx5e");
+ if (!priv->wq)
+ goto err_free_netdev;
+
+ err = mlx5e_create_umr_mkey(priv);
if (err) {
- mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
- goto err_destroy_rqt_single;
+ mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
+ goto err_destroy_wq;
}
- err = mlx5e_create_flow_tables(priv);
+ err = profile->init_tx(priv);
+ if (err)
+ goto err_destroy_umr_mkey;
+
+ err = mlx5e_open_drop_rq(priv);
if (err) {
- mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
- goto err_destroy_tirs;
+ mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
+ goto err_cleanup_tx;
}
- mlx5e_init_eth_addr(priv);
+ err = profile->init_rx(priv);
+ if (err)
+ goto err_close_drop_rq;
- mlx5e_vxlan_init(priv);
+ mlx5e_create_q_counter(priv);
- err = mlx5e_tc_init(priv);
- if (err)
- goto err_destroy_flow_tables;
+ mlx5e_init_l2_addr(priv);
-#ifdef CONFIG_MLX5_CORE_EN_DCB
- mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
-#endif
+ mlx5e_set_dev_port_mtu(netdev);
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
- goto err_tc_cleanup;
+ goto err_dealloc_q_counters;
}
- if (mlx5e_vxlan_allowed(mdev))
- vxlan_get_rx_port(netdev);
-
- mlx5e_enable_async_events(priv);
- queue_work(priv->wq, &priv->set_rx_mode_work);
+ if (profile->enable)
+ profile->enable(priv);
return priv;
-err_tc_cleanup:
- mlx5e_tc_cleanup(priv);
-
-err_destroy_flow_tables:
- mlx5e_destroy_flow_tables(priv);
-
-err_destroy_tirs:
- mlx5e_destroy_tirs(priv);
-
-err_destroy_rqt_single:
- mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
-
-err_destroy_rqt_indir:
- mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+err_dealloc_q_counters:
+ mlx5e_destroy_q_counter(priv);
+ profile->cleanup_rx(priv);
err_close_drop_rq:
mlx5e_close_drop_rq(priv);
-err_destroy_tises:
- mlx5e_destroy_tises(priv);
+err_cleanup_tx:
+ profile->cleanup_tx(priv);
-err_destroy_mkey:
- mlx5_core_destroy_mkey(mdev, &priv->mkey);
-
-err_dealloc_transport_domain:
- mlx5_core_dealloc_transport_domain(mdev, priv->tdn);
-
-err_dealloc_pd:
- mlx5_core_dealloc_pd(mdev, priv->pdn);
-
-err_unmap_free_uar:
- mlx5_unmap_free_uar(mdev, &priv->cq_uar);
+err_destroy_umr_mkey:
+ mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
err_destroy_wq:
destroy_workqueue(priv->wq);
@@ -2633,45 +3484,100 @@ err_free_netdev:
return NULL;
}
-static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
+static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
{
- struct mlx5e_priv *priv = vpriv;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+ int vport;
+ u8 mac[ETH_ALEN];
+
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return;
+
+ mlx5_query_nic_vport_mac_address(mdev, 0, mac);
+
+ for (vport = 1; vport < total_vfs; vport++) {
+ struct mlx5_eswitch_rep rep;
+
+ rep.load = mlx5e_vport_rep_load;
+ rep.unload = mlx5e_vport_rep_unload;
+ rep.vport = vport;
+ ether_addr_copy(rep.hw_id, mac);
+ mlx5_eswitch_register_vport_rep(esw, &rep);
+ }
+}
+
+static void *mlx5e_add(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ void *ppriv = NULL;
+ void *ret;
+
+ if (mlx5e_check_required_hca_cap(mdev))
+ return NULL;
+
+ if (mlx5e_create_mdev_resources(mdev))
+ return NULL;
+
+ mlx5e_register_vport_rep(mdev);
+
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ ppriv = &esw->offloads.vport_reps[0];
+
+ ret = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
+ if (!ret) {
+ mlx5e_destroy_mdev_resources(mdev);
+ return NULL;
+ }
+ return ret;
+}
+
+void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
+{
+ const struct mlx5e_profile *profile = priv->profile;
struct net_device *netdev = priv->netdev;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
+ if (profile->disable)
+ profile->disable(priv);
- queue_work(priv->wq, &priv->set_rx_mode_work);
- mlx5e_disable_async_events(priv);
flush_workqueue(priv->wq);
if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
netif_device_detach(netdev);
- mutex_lock(&priv->state_lock);
- if (test_bit(MLX5E_STATE_OPENED, &priv->state))
- mlx5e_close_locked(netdev);
- mutex_unlock(&priv->state_lock);
+ mlx5e_close(netdev);
} else {
unregister_netdev(netdev);
}
- mlx5e_tc_cleanup(priv);
- mlx5e_vxlan_cleanup(priv);
- mlx5e_destroy_flow_tables(priv);
- mlx5e_destroy_tirs(priv);
- mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
- mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
+ mlx5e_destroy_q_counter(priv);
+ profile->cleanup_rx(priv);
mlx5e_close_drop_rq(priv);
- mlx5e_destroy_tises(priv);
- mlx5_core_destroy_mkey(priv->mdev, &priv->mkey);
- mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
- mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
- mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+ profile->cleanup_tx(priv);
+ mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
cancel_delayed_work_sync(&priv->update_stats_work);
destroy_workqueue(priv->wq);
+ if (profile->cleanup)
+ profile->cleanup(priv);
if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
free_netdev(netdev);
}
+static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
+{
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(mdev);
+ struct mlx5e_priv *priv = vpriv;
+ int vport;
+
+ mlx5e_destroy_netdev(mdev, priv);
+
+ for (vport = 1; vport < total_vfs; vport++)
+ mlx5_eswitch_unregister_vport_rep(esw, vport);
+
+ mlx5e_destroy_mdev_resources(mdev);
+}
+
static void *mlx5e_get_netdev(void *vpriv)
{
struct mlx5e_priv *priv = vpriv;
@@ -2680,8 +3586,8 @@ static void *mlx5e_get_netdev(void *vpriv)
}
static struct mlx5_interface mlx5e_interface = {
- .add = mlx5e_create_netdev,
- .remove = mlx5e_destroy_netdev,
+ .add = mlx5e_add,
+ .remove = mlx5e_remove,
.event = mlx5e_async_event,
.protocol = MLX5_INTERFACE_PROTOCOL_ETH,
.get_dev = mlx5e_get_netdev,
@@ -2689,6 +3595,7 @@ static struct mlx5_interface mlx5e_interface = {
void mlx5e_init(void)
{
+ mlx5e_build_ptys2ethtool_map();
mlx5_register_interface(&mlx5e_interface);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
new file mode 100644
index 000000000000..134de4a11f1d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -0,0 +1,431 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <generated/utsrelease.h>
+#include <linux/mlx5/fs.h>
+#include <net/switchdev.h>
+#include <net/pkt_cls.h>
+
+#include "eswitch.h"
+#include "en.h"
+#include "en_tc.h"
+
+static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
+
+static void mlx5e_rep_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
+}
+
+static const struct counter_desc sw_rep_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
+};
+
+#define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
+
+static void mlx5e_rep_get_strings(struct net_device *dev,
+ u32 stringset, uint8_t *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
+ strcpy(data + (i * ETH_GSTRING_LEN),
+ sw_rep_stats_desc[i].format);
+ break;
+ }
+}
+
+static void mlx5e_update_sw_rep_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5e_sw_stats *s = &priv->stats.sw;
+ struct mlx5e_rq_stats *rq_stats;
+ struct mlx5e_sq_stats *sq_stats;
+ int i, j;
+
+ memset(s, 0, sizeof(*s));
+ for (i = 0; i < priv->params.num_channels; i++) {
+ rq_stats = &priv->channel[i]->rq.stats;
+
+ s->rx_packets += rq_stats->packets;
+ s->rx_bytes += rq_stats->bytes;
+
+ for (j = 0; j < priv->params.num_tc; j++) {
+ sq_stats = &priv->channel[i]->sq[j].stats;
+
+ s->tx_packets += sq_stats->packets;
+ s->tx_bytes += sq_stats->bytes;
+ }
+ }
+}
+
+static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (!data)
+ return;
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_update_sw_rep_counters(priv);
+ mutex_unlock(&priv->state_lock);
+
+ for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
+ data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
+ sw_rep_stats_desc, i);
+}
+
+static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return NUM_VPORT_REP_COUNTERS;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
+ .get_drvinfo = mlx5e_rep_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlx5e_rep_get_strings,
+ .get_sset_count = mlx5e_rep_get_sset_count,
+ .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
+};
+
+int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+
+ if (esw->mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
+ attr->u.ppid.id_len = ETH_ALEN;
+ ether_addr_copy(attr->u.ppid.id, rep->hw_id);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
+
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5e_channel *c;
+ int n, tc, err, num_sqs = 0;
+ u16 *sqs;
+
+ sqs = kcalloc(priv->params.num_channels * priv->params.num_tc, sizeof(u16), GFP_KERNEL);
+ if (!sqs)
+ return -ENOMEM;
+
+ for (n = 0; n < priv->params.num_channels; n++) {
+ c = priv->channel[n];
+ for (tc = 0; tc < c->num_tc; tc++)
+ sqs[num_sqs++] = c->sq[tc].sqn;
+ }
+
+ err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
+
+ kfree(sqs);
+ return err;
+}
+
+int mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_priv *priv = rep->priv_data;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return mlx5e_add_sqs_fwd_rules(priv);
+ return 0;
+}
+
+void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+
+ mlx5_eswitch_sqs2vport_stop(esw, rep);
+}
+
+void mlx5e_nic_rep_unload(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_priv *priv = rep->priv_data;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_remove_sqs_fwd_rules(priv);
+
+ /* clean (and re-init) existing uplink offloaded TC rules */
+ mlx5e_tc_cleanup(priv);
+ mlx5e_tc_init(priv);
+}
+
+static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
+ char *buf, size_t len)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ int ret;
+
+ ret = snprintf(buf, len, "%d", rep->vport - 1);
+ if (ret >= len)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int mlx5e_rep_ndo_setup_tc(struct net_device *dev, u32 handle,
+ __be16 proto, struct tc_to_netdev *tc)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
+ return -EOPNOTSUPP;
+
+ switch (tc->type) {
+ case TC_SETUP_CLSFLOWER:
+ switch (tc->cls_flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return mlx5e_configure_flower(priv, proto, tc->cls_flower);
+ case TC_CLSFLOWER_DESTROY:
+ return mlx5e_delete_flower(priv, tc->cls_flower);
+ case TC_CLSFLOWER_STATS:
+ return mlx5e_stats_flower(priv, tc->cls_flower);
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
+ .switchdev_port_attr_get = mlx5e_attr_get,
+};
+
+static const struct net_device_ops mlx5e_netdev_ops_rep = {
+ .ndo_open = mlx5e_open,
+ .ndo_stop = mlx5e_close,
+ .ndo_start_xmit = mlx5e_xmit,
+ .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
+ .ndo_setup_tc = mlx5e_rep_ndo_setup_tc,
+ .ndo_get_stats64 = mlx5e_get_stats,
+};
+
+static void mlx5e_build_rep_netdev_priv(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
+ MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+
+ priv->params.log_sq_size =
+ MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
+ priv->params.rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
+ priv->params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+
+ priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
+ BIT(priv->params.log_rq_size));
+
+ priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
+ mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
+
+ priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
+ priv->params.num_tc = 1;
+
+ priv->params.lro_wqe_sz =
+ MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->params.num_channels = profile->max_nch(mdev);
+ priv->profile = profile;
+ priv->ppriv = ppriv;
+
+ mutex_init(&priv->state_lock);
+
+ INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+}
+
+static void mlx5e_build_rep_netdev(struct net_device *netdev)
+{
+ netdev->netdev_ops = &mlx5e_netdev_ops_rep;
+
+ netdev->watchdog_timeo = 15 * HZ;
+
+ netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
+
+#ifdef CONFIG_NET_SWITCHDEV
+ netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
+#endif
+
+ netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC;
+ netdev->hw_features |= NETIF_F_HW_TC;
+
+ eth_hw_addr_random(netdev);
+}
+
+static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ mlx5e_build_rep_netdev_priv(mdev, netdev, profile, ppriv);
+ mlx5e_build_rep_netdev(netdev);
+}
+
+static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_flow_rule *flow_rule;
+ int err;
+ int i;
+
+ err = mlx5e_create_direct_rqts(priv);
+ if (err) {
+ mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5e_create_direct_tirs(priv);
+ if (err) {
+ mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
+ goto err_destroy_direct_rqts;
+ }
+
+ flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
+ rep->vport,
+ priv->direct_tir[0].tirn);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ goto err_destroy_direct_tirs;
+ }
+ rep->vport_rx_rule = flow_rule;
+
+ err = mlx5e_tc_init(priv);
+ if (err)
+ goto err_del_flow_rule;
+
+ return 0;
+
+err_del_flow_rule:
+ mlx5_del_flow_rule(rep->vport_rx_rule);
+err_destroy_direct_tirs:
+ mlx5e_destroy_direct_tirs(priv);
+err_destroy_direct_rqts:
+ for (i = 0; i < priv->params.num_channels; i++)
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+ return err;
+}
+
+static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ int i;
+
+ mlx5e_tc_cleanup(priv);
+ mlx5_del_flow_rule(rep->vport_rx_rule);
+ mlx5e_destroy_direct_tirs(priv);
+ for (i = 0; i < priv->params.num_channels; i++)
+ mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
+}
+
+static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_create_tises(priv);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
+ return err;
+ }
+ return 0;
+}
+
+static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
+{
+#define MLX5E_PORT_REPRESENTOR_NCH 1
+ return MLX5E_PORT_REPRESENTOR_NCH;
+}
+
+static struct mlx5e_profile mlx5e_rep_profile = {
+ .init = mlx5e_init_rep,
+ .init_rx = mlx5e_init_rep_rx,
+ .cleanup_rx = mlx5e_cleanup_rep_rx,
+ .init_tx = mlx5e_init_rep_tx,
+ .cleanup_tx = mlx5e_cleanup_nic_tx,
+ .update_stats = mlx5e_update_sw_rep_counters,
+ .max_nch = mlx5e_get_rep_max_num_channels,
+ .max_tc = 1,
+};
+
+int mlx5e_vport_rep_load(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ rep->priv_data = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rep);
+ if (!rep->priv_data) {
+ pr_warn("Failed to create representor for vport %d\n",
+ rep->vport);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void mlx5e_vport_rep_unload(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_priv *priv = rep->priv_data;
+
+ mlx5e_destroy_netdev(esw->dev, priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 58d4e2f962c3..e7c969df3dad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -42,13 +42,149 @@ static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
}
-static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
- struct mlx5e_rx_wqe *wqe, u16 ix)
+static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
+ void *data)
+{
+ u32 ci = cqcc & cq->wq.sz_m1;
+
+ memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
+}
+
+static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq, u32 cqcc)
+{
+ mlx5e_read_cqe_slot(cq, cqcc, &cq->title);
+ cq->decmprs_left = be32_to_cpu(cq->title.byte_cnt);
+ cq->decmprs_wqe_counter = be16_to_cpu(cq->title.wqe_counter);
+ rq->stats.cqe_compress_blks++;
+}
+
+static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
+{
+ mlx5e_read_cqe_slot(cq, cqcc, cq->mini_arr);
+ cq->mini_arr_idx = 0;
+}
+
+static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
+{
+ u8 op_own = (cqcc >> cq->wq.log_sz) & 1;
+ u32 wq_sz = 1 << cq->wq.log_sz;
+ u32 ci = cqcc & cq->wq.sz_m1;
+ u32 ci_top = min_t(u32, wq_sz, ci + n);
+
+ for (; ci < ci_top; ci++, n--) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+
+ cqe->op_own = op_own;
+ }
+
+ if (unlikely(ci == wq_sz)) {
+ op_own = !op_own;
+ for (ci = 0; ci < n; ci++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, ci);
+
+ cqe->op_own = op_own;
+ }
+ }
+}
+
+static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq, u32 cqcc)
+{
+ u16 wqe_cnt_step;
+
+ cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
+ cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum;
+ cq->title.op_own &= 0xf0;
+ cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz);
+ cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter);
+
+ wqe_cnt_step =
+ rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ ?
+ mpwrq_get_cqe_consumed_strides(&cq->title) : 1;
+ cq->decmprs_wqe_counter =
+ (cq->decmprs_wqe_counter + wqe_cnt_step) & rq->wq.sz_m1;
+}
+
+static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq, u32 cqcc)
+{
+ mlx5e_decompress_cqe(rq, cq, cqcc);
+ cq->title.rss_hash_type = 0;
+ cq->title.rss_hash_result = 0;
+}
+
+static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq,
+ int update_owner_only,
+ int budget_rem)
+{
+ u32 cqcc = cq->wq.cc + update_owner_only;
+ u32 cqe_count;
+ u32 i;
+
+ cqe_count = min_t(u32, cq->decmprs_left, budget_rem);
+
+ for (i = update_owner_only; i < cqe_count;
+ i++, cq->mini_arr_idx++, cqcc++) {
+ if (cq->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
+ mlx5e_read_mini_arr_slot(cq, cqcc);
+
+ mlx5e_decompress_cqe_no_hash(rq, cq, cqcc);
+ rq->handle_rx_cqe(rq, &cq->title);
+ }
+ mlx5e_cqes_update_owner(cq, cq->wq.cc, cqcc - cq->wq.cc);
+ cq->wq.cc = cqcc;
+ cq->decmprs_left -= cqe_count;
+ rq->stats.cqe_compress_pkts += cqe_count;
+
+ return cqe_count;
+}
+
+static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
+ struct mlx5e_cq *cq,
+ int budget_rem)
+{
+ mlx5e_read_title_slot(rq, cq, cq->wq.cc);
+ mlx5e_read_mini_arr_slot(cq, cq->wq.cc + 1);
+ mlx5e_decompress_cqe(rq, cq, cq->wq.cc);
+ rq->handle_rx_cqe(rq, &cq->title);
+ cq->mini_arr_idx++;
+
+ return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
+}
+
+void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val)
+{
+ bool was_opened;
+
+ if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
+ return;
+
+ mutex_lock(&priv->state_lock);
+
+ if (priv->params.rx_cqe_compress == val)
+ goto unlock;
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(priv->netdev);
+
+ priv->params.rx_cqe_compress = val;
+
+ if (was_opened)
+ mlx5e_open_locked(priv->netdev);
+
+unlock:
+ mutex_unlock(&priv->state_lock);
+}
+
+int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
{
struct sk_buff *skb;
dma_addr_t dma_addr;
- skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
+ skb = napi_alloc_skb(rq->cq.napi, rq->wqe_sz);
if (unlikely(!skb))
return -ENOMEM;
@@ -62,10 +198,9 @@ static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
goto err_free_skb;
- skb_reserve(skb, MLX5E_NET_IP_ALIGN);
-
*((dma_addr_t *)skb->cb) = dma_addr;
- wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
+ wqe->data.addr = cpu_to_be64(dma_addr);
+ wqe->data.lkey = rq->mkey_be;
rq->skb[ix] = skb;
@@ -77,18 +212,416 @@ err_free_skb:
return -ENOMEM;
}
+void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+ struct sk_buff *skb = rq->skb[ix];
+
+ if (skb) {
+ rq->skb[ix] = NULL;
+ dma_unmap_single(rq->pdev,
+ *((dma_addr_t *)skb->cb),
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ }
+}
+
+static inline int mlx5e_mpwqe_strides_per_page(struct mlx5e_rq *rq)
+{
+ return rq->mpwqe_num_strides >> MLX5_MPWRQ_WQE_PAGE_ORDER;
+}
+
+static inline void
+mlx5e_dma_pre_sync_linear_mpwqe(struct device *pdev,
+ struct mlx5e_mpw_info *wi,
+ u32 wqe_offset, u32 len)
+{
+ dma_sync_single_for_cpu(pdev, wi->dma_info.addr + wqe_offset,
+ len, DMA_FROM_DEVICE);
+}
+
+static inline void
+mlx5e_dma_pre_sync_fragmented_mpwqe(struct device *pdev,
+ struct mlx5e_mpw_info *wi,
+ u32 wqe_offset, u32 len)
+{
+ /* No dma pre sync for fragmented MPWQE */
+}
+
+static inline void
+mlx5e_add_skb_frag_linear_mpwqe(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 frag_offset,
+ u32 len)
+{
+ unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
+
+ wi->skbs_frags[page_idx]++;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ &wi->dma_info.page[page_idx], frag_offset,
+ len, truesize);
+}
+
+static inline void
+mlx5e_add_skb_frag_fragmented_mpwqe(struct mlx5e_rq *rq,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 frag_offset,
+ u32 len)
+{
+ unsigned int truesize = ALIGN(len, rq->mpwqe_stride_sz);
+
+ dma_sync_single_for_cpu(rq->pdev,
+ wi->umr.dma_info[page_idx].addr + frag_offset,
+ len, DMA_FROM_DEVICE);
+ wi->skbs_frags[page_idx]++;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ wi->umr.dma_info[page_idx].page, frag_offset,
+ len, truesize);
+}
+
+static inline void
+mlx5e_copy_skb_header_linear_mpwqe(struct device *pdev,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 offset,
+ u32 headlen)
+{
+ struct page *page = &wi->dma_info.page[page_idx];
+
+ skb_copy_to_linear_data(skb, page_address(page) + offset,
+ ALIGN(headlen, sizeof(long)));
+}
+
+static inline void
+mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
+ struct sk_buff *skb,
+ struct mlx5e_mpw_info *wi,
+ u32 page_idx, u32 offset,
+ u32 headlen)
+{
+ u16 headlen_pg = min_t(u32, headlen, PAGE_SIZE - offset);
+ struct mlx5e_dma_info *dma_info = &wi->umr.dma_info[page_idx];
+ unsigned int len;
+
+ /* Aligning len to sizeof(long) optimizes memcpy performance */
+ len = ALIGN(headlen_pg, sizeof(long));
+ dma_sync_single_for_cpu(pdev, dma_info->addr + offset, len,
+ DMA_FROM_DEVICE);
+ skb_copy_to_linear_data_offset(skb, 0,
+ page_address(dma_info->page) + offset,
+ len);
+ if (unlikely(offset + headlen > PAGE_SIZE)) {
+ dma_info++;
+ headlen_pg = len;
+ len = ALIGN(headlen - headlen_pg, sizeof(long));
+ dma_sync_single_for_cpu(pdev, dma_info->addr, len,
+ DMA_FROM_DEVICE);
+ skb_copy_to_linear_data_offset(skb, headlen_pg,
+ page_address(dma_info->page),
+ len);
+ }
+}
+
+static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
+{
+ return rq->mpwqe_mtt_offset +
+ wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
+}
+
+static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
+ struct mlx5e_sq *sq,
+ struct mlx5e_umr_wqe *wqe,
+ u16 ix)
+{
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
+ struct mlx5_wqe_data_seg *dseg = &wqe->data;
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
+ u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
+
+ memset(wqe, 0, sizeof(*wqe));
+ cseg->opmod_idx_opcode =
+ cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_UMR);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
+ ds_cnt);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+ cseg->imm = rq->umr_mkey_be;
+
+ ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
+ ucseg->klm_octowords =
+ cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
+ ucseg->bsf_octowords =
+ cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
+ ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+
+ dseg->lkey = sq->mkey_be;
+ dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
+}
+
+static void mlx5e_post_umr_wqe(struct mlx5e_rq *rq, u16 ix)
+{
+ struct mlx5e_sq *sq = &rq->channel->icosq;
+ struct mlx5_wq_cyc *wq = &sq->wq;
+ struct mlx5e_umr_wqe *wqe;
+ u8 num_wqebbs = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_BB);
+ u16 pi;
+
+ /* fill sq edge with nops to avoid wqe wrap around */
+ while ((pi = (sq->pc & wq->sz_m1)) > sq->edge) {
+ sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
+ sq->ico_wqe_info[pi].num_wqebbs = 1;
+ mlx5e_send_nop(sq, true);
+ }
+
+ wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+ mlx5e_build_umr_wqe(rq, sq, wqe, ix);
+ sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_UMR;
+ sq->ico_wqe_info[pi].num_wqebbs = num_wqebbs;
+ sq->pc += num_wqebbs;
+ mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
+}
+
+static inline int mlx5e_get_wqe_mtt_sz(void)
+{
+ /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
+ * To avoid copying garbage after the mtt array, we allocate
+ * a little more.
+ */
+ return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
+ MLX5_UMR_MTT_ALIGNMENT);
+}
+
+static int mlx5e_alloc_and_map_page(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi,
+ int i)
+{
+ struct page *page;
+
+ page = dev_alloc_page();
+ if (unlikely(!page))
+ return -ENOMEM;
+
+ wi->umr.dma_info[i].page = page;
+ wi->umr.dma_info[i].addr = dma_map_page(rq->pdev, page, 0, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(rq->pdev, wi->umr.dma_info[i].addr))) {
+ put_page(page);
+ return -ENOMEM;
+ }
+ wi->umr.mtt[i] = cpu_to_be64(wi->umr.dma_info[i].addr | MLX5_EN_WR);
+
+ return 0;
+}
+
+static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_rx_wqe *wqe,
+ u16 ix)
+{
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
+ int i;
+
+ wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
+ MLX5_MPWRQ_PAGES_PER_WQE,
+ GFP_ATOMIC);
+ if (unlikely(!wi->umr.dma_info))
+ goto err_out;
+
+ /* We allocate more than mtt_sz as we will align the pointer */
+ wi->umr.mtt_no_align = kzalloc(mtt_sz + MLX5_UMR_ALIGN - 1,
+ GFP_ATOMIC);
+ if (unlikely(!wi->umr.mtt_no_align))
+ goto err_free_umr;
+
+ wi->umr.mtt = PTR_ALIGN(wi->umr.mtt_no_align, MLX5_UMR_ALIGN);
+ wi->umr.mtt_addr = dma_map_single(rq->pdev, wi->umr.mtt, mtt_sz,
+ PCI_DMA_TODEVICE);
+ if (unlikely(dma_mapping_error(rq->pdev, wi->umr.mtt_addr)))
+ goto err_free_mtt;
+
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ if (unlikely(mlx5e_alloc_and_map_page(rq, wi, i)))
+ goto err_unmap;
+ page_ref_add(wi->umr.dma_info[i].page,
+ mlx5e_mpwqe_strides_per_page(rq));
+ wi->skbs_frags[i] = 0;
+ }
+
+ wi->consumed_strides = 0;
+ wi->dma_pre_sync = mlx5e_dma_pre_sync_fragmented_mpwqe;
+ wi->add_skb_frag = mlx5e_add_skb_frag_fragmented_mpwqe;
+ wi->copy_skb_header = mlx5e_copy_skb_header_fragmented_mpwqe;
+ wi->free_wqe = mlx5e_free_rx_fragmented_mpwqe;
+ wqe->data.lkey = rq->umr_mkey_be;
+ wqe->data.addr = cpu_to_be64(dma_offset);
+
+ return 0;
+
+err_unmap:
+ while (--i >= 0) {
+ dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ page_ref_sub(wi->umr.dma_info[i].page,
+ mlx5e_mpwqe_strides_per_page(rq));
+ put_page(wi->umr.dma_info[i].page);
+ }
+ dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
+
+err_free_mtt:
+ kfree(wi->umr.mtt_no_align);
+
+err_free_umr:
+ kfree(wi->umr.dma_info);
+
+err_out:
+ return -ENOMEM;
+}
+
+void mlx5e_free_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi)
+{
+ int mtt_sz = mlx5e_get_wqe_mtt_sz();
+ int i;
+
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ dma_unmap_page(rq->pdev, wi->umr.dma_info[i].addr, PAGE_SIZE,
+ PCI_DMA_FROMDEVICE);
+ page_ref_sub(wi->umr.dma_info[i].page,
+ mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
+ put_page(wi->umr.dma_info[i].page);
+ }
+ dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz, PCI_DMA_TODEVICE);
+ kfree(wi->umr.mtt_no_align);
+ kfree(wi->umr.dma_info);
+}
+
+void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+
+ clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+
+ if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
+ mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+ return;
+ }
+
+ mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+ rq->stats.mpwqe_frag++;
+
+ /* ensure wqes are visible to device before updating doorbell record */
+ dma_wmb();
+
+ mlx5_wq_ll_update_db_record(wq);
+}
+
+static int mlx5e_alloc_rx_linear_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_rx_wqe *wqe,
+ u16 ix)
+{
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+ gfp_t gfp_mask;
+ int i;
+
+ gfp_mask = GFP_ATOMIC | __GFP_COLD | __GFP_MEMALLOC;
+ wi->dma_info.page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
+ MLX5_MPWRQ_WQE_PAGE_ORDER);
+ if (unlikely(!wi->dma_info.page))
+ return -ENOMEM;
+
+ wi->dma_info.addr = dma_map_page(rq->pdev, wi->dma_info.page, 0,
+ rq->wqe_sz, PCI_DMA_FROMDEVICE);
+ if (unlikely(dma_mapping_error(rq->pdev, wi->dma_info.addr))) {
+ put_page(wi->dma_info.page);
+ return -ENOMEM;
+ }
+
+ /* We split the high-order page into order-0 ones and manage their
+ * reference counter to minimize the memory held by small skb fragments
+ */
+ split_page(wi->dma_info.page, MLX5_MPWRQ_WQE_PAGE_ORDER);
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ page_ref_add(&wi->dma_info.page[i],
+ mlx5e_mpwqe_strides_per_page(rq));
+ wi->skbs_frags[i] = 0;
+ }
+
+ wi->consumed_strides = 0;
+ wi->dma_pre_sync = mlx5e_dma_pre_sync_linear_mpwqe;
+ wi->add_skb_frag = mlx5e_add_skb_frag_linear_mpwqe;
+ wi->copy_skb_header = mlx5e_copy_skb_header_linear_mpwqe;
+ wi->free_wqe = mlx5e_free_rx_linear_mpwqe;
+ wqe->data.lkey = rq->mkey_be;
+ wqe->data.addr = cpu_to_be64(wi->dma_info.addr);
+
+ return 0;
+}
+
+void mlx5e_free_rx_linear_mpwqe(struct mlx5e_rq *rq,
+ struct mlx5e_mpw_info *wi)
+{
+ int i;
+
+ dma_unmap_page(rq->pdev, wi->dma_info.addr, rq->wqe_sz,
+ PCI_DMA_FROMDEVICE);
+ for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) {
+ page_ref_sub(&wi->dma_info.page[i],
+ mlx5e_mpwqe_strides_per_page(rq) - wi->skbs_frags[i]);
+ put_page(&wi->dma_info.page[i]);
+ }
+}
+
+int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+ int err;
+
+ err = mlx5e_alloc_rx_linear_mpwqe(rq, wqe, ix);
+ if (unlikely(err)) {
+ err = mlx5e_alloc_rx_fragmented_mpwqe(rq, wqe, ix);
+ if (unlikely(err))
+ return err;
+ set_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+ mlx5e_post_umr_wqe(rq, ix);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
+{
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
+
+ wi->free_wqe(rq, wi);
+}
+
+#define RQ_CANNOT_POST(rq) \
+ (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \
+ test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
struct mlx5_wq_ll *wq = &rq->wq;
- if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
+ if (unlikely(RQ_CANNOT_POST(rq)))
return false;
while (!mlx5_wq_ll_is_full(wq)) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+ int err;
- if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
+ err = rq->alloc_wqe(rq, wqe, wq->head);
+ if (unlikely(err)) {
+ if (err != -EBUSY)
+ rq->stats.buff_alloc_err++;
break;
+ }
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
}
@@ -101,26 +634,35 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
return !mlx5_wq_ll_is_full(wq);
}
-static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
+static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt)
{
- struct ethhdr *eth = (struct ethhdr *)(skb->data);
- struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
- struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
struct tcphdr *tcp;
+ int network_depth = 0;
+ __be16 proto;
+ u16 tot_len;
u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
(CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
- u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
+ skb->mac_len = ETH_HLEN;
+ proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+
+ ipv4 = (struct iphdr *)(skb->data + network_depth);
+ ipv6 = (struct ipv6hdr *)(skb->data + network_depth);
+ tot_len = cqe_bcnt - network_depth;
- if (eth->h_proto == htons(ETH_P_IP)) {
- tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ if (proto == htons(ETH_P_IP)) {
+ tcp = (struct tcphdr *)(skb->data + network_depth +
sizeof(struct iphdr));
ipv6 = NULL;
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
} else {
- tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ tcp = (struct tcphdr *)(skb->data + network_depth +
sizeof(struct ipv6hdr));
ipv4 = NULL;
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
@@ -176,35 +718,43 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
if (lro) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else if (likely(is_first_ethertype_ip(skb))) {
+ return;
+ }
+
+ if (is_first_ethertype_ip(skb)) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
- rq->stats.csum_sw++;
- } else {
- goto csum_none;
+ rq->stats.csum_complete++;
+ return;
}
- return;
-
+ if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
+ (cqe->hds_ip_ext & CQE_L4_OK))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (cqe_is_tunneled(cqe)) {
+ skb->csum_level = 1;
+ skb->encapsulation = 1;
+ rq->stats.csum_unnecessary_inner++;
+ }
+ return;
+ }
csum_none:
skb->ip_summed = CHECKSUM_NONE;
rq->stats.csum_none++;
}
static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt,
struct mlx5e_rq *rq,
struct sk_buff *skb)
{
struct net_device *netdev = rq->netdev;
- u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
struct mlx5e_tstamp *tstamp = rq->tstamp;
int lro_num_seg;
- skb_put(skb, cqe_bcnt);
-
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
if (lro_num_seg > 1) {
- mlx5e_lro_update_hdr(skb, cqe);
+ mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
rq->stats.lro_packets++;
rq->stats.lro_bytes += cqe_bcnt;
@@ -213,10 +763,6 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
- mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
-
- skb->protocol = eth_type_trans(skb, netdev);
-
skb_record_rx_queue(skb, rq->ix);
if (likely(netdev->features & NETIF_F_RXHASH))
@@ -227,52 +773,168 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
be16_to_cpu(cqe->vlan_info));
skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
+
+ mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
+ skb->protocol = eth_type_trans(skb, netdev);
+}
+
+static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt,
+ struct sk_buff *skb)
+{
+ rq->stats.packets++;
+ rq->stats.bytes += cqe_bcnt;
+ mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ napi_gro_receive(rq->cq.napi, skb);
+}
+
+void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ struct mlx5e_rx_wqe *wqe;
+ struct sk_buff *skb;
+ __be16 wqe_counter_be;
+ u16 wqe_counter;
+ u32 cqe_bcnt;
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ skb = rq->skb[wqe_counter];
+ prefetch(skb->data);
+ rq->skb[wqe_counter] = NULL;
+
+ dma_unmap_single(rq->pdev,
+ *((dma_addr_t *)skb->cb),
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ dev_kfree_skb(skb);
+ goto wq_ll_pop;
+ }
+
+ cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ skb_put(skb, cqe_bcnt);
+
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+}
+
+static inline void mlx5e_mpwqe_fill_rx_skb(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ struct mlx5e_mpw_info *wi,
+ u32 cqe_bcnt,
+ struct sk_buff *skb)
+{
+ u32 consumed_bytes = ALIGN(cqe_bcnt, rq->mpwqe_stride_sz);
+ u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
+ u32 wqe_offset = stride_ix * rq->mpwqe_stride_sz;
+ u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
+ u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ u32 head_page_idx = page_idx;
+ u16 headlen = min_t(u16, MLX5_MPWRQ_SMALL_PACKET_THRESHOLD, cqe_bcnt);
+ u32 frag_offset = head_offset + headlen;
+ u16 byte_cnt = cqe_bcnt - headlen;
+
+ if (unlikely(frag_offset >= PAGE_SIZE)) {
+ page_idx++;
+ frag_offset -= PAGE_SIZE;
+ }
+ wi->dma_pre_sync(rq->pdev, wi, wqe_offset, consumed_bytes);
+
+ while (byte_cnt) {
+ u32 pg_consumed_bytes =
+ min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
+
+ wi->add_skb_frag(rq, skb, wi, page_idx, frag_offset,
+ pg_consumed_bytes);
+ byte_cnt -= pg_consumed_bytes;
+ frag_offset = 0;
+ page_idx++;
+ }
+ /* copy header */
+ wi->copy_skb_header(rq->pdev, skb, wi, head_page_idx, head_offset,
+ headlen);
+ /* skb linear part was allocated with headlen and aligned to long */
+ skb->tail += headlen;
+ skb->len += headlen;
+}
+
+void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
+ u16 wqe_id = be16_to_cpu(cqe->wqe_id);
+ struct mlx5e_mpw_info *wi = &rq->wqe_info[wqe_id];
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_id);
+ struct sk_buff *skb;
+ u16 cqe_bcnt;
+
+ wi->consumed_strides += cstrides;
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ goto mpwrq_cqe_out;
+ }
+
+ if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+ rq->stats.mpwqe_filler++;
+ goto mpwrq_cqe_out;
+ }
+
+ skb = napi_alloc_skb(rq->cq.napi,
+ ALIGN(MLX5_MPWRQ_SMALL_PACKET_THRESHOLD,
+ sizeof(long)));
+ if (unlikely(!skb)) {
+ rq->stats.buff_alloc_err++;
+ goto mpwrq_cqe_out;
+ }
+
+ prefetch(skb->data);
+ cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+
+ mlx5e_mpwqe_fill_rx_skb(rq, cqe, wi, cqe_bcnt, skb);
+ mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+
+mpwrq_cqe_out:
+ if (likely(wi->consumed_strides < rq->mpwqe_num_strides))
+ return;
+
+ wi->free_wqe(rq, wi);
+ mlx5_wq_ll_pop(&rq->wq, cqe->wqe_id, &wqe->next.next_wqe_index);
}
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
{
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
- int work_done;
+ int work_done = 0;
- for (work_done = 0; work_done < budget; work_done++) {
- struct mlx5e_rx_wqe *wqe;
- struct mlx5_cqe64 *cqe;
- struct sk_buff *skb;
- __be16 wqe_counter_be;
- u16 wqe_counter;
+ if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
+ return 0;
- cqe = mlx5e_get_cqe(cq);
- if (!cqe)
- break;
+ if (cq->decmprs_left)
+ work_done += mlx5e_decompress_cqes_cont(rq, cq, 0, budget);
- mlx5_cqwq_pop(&cq->wq);
-
- wqe_counter_be = cqe->wqe_counter;
- wqe_counter = be16_to_cpu(wqe_counter_be);
- wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
- skb = rq->skb[wqe_counter];
- prefetch(skb->data);
- rq->skb[wqe_counter] = NULL;
+ for (; work_done < budget; work_done++) {
+ struct mlx5_cqe64 *cqe = mlx5e_get_cqe(cq);
- dma_unmap_single(rq->pdev,
- *((dma_addr_t *)skb->cb),
- rq->wqe_sz,
- DMA_FROM_DEVICE);
+ if (!cqe)
+ break;
- if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
- rq->stats.wqe_err++;
- dev_kfree_skb(skb);
- goto wq_ll_pop;
+ if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
+ work_done +=
+ mlx5e_decompress_cqes_start(rq, cq,
+ budget - work_done);
+ continue;
}
- mlx5e_build_rx_skb(cqe, rq, skb);
- rq->stats.packets++;
- rq->stats.bytes += be32_to_cpu(cqe->byte_cnt);
- napi_gro_receive(cq->napi, skb);
+ mlx5_cqwq_pop(&cq->wq);
-wq_ll_pop:
- mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
- &wqe->next.next_wqe_index);
+ rq->handle_rx_cqe(rq, cqe);
}
mlx5_cqwq_update_db_record(&cq->wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
new file mode 100644
index 000000000000..1fffe48a93cc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+/* Adaptive moderation profiles */
+#define MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
+#define MLX5E_RX_AM_DEF_PROFILE_CQE 1
+#define MLX5E_RX_AM_DEF_PROFILE_EQE 1
+#define MLX5E_PARAMS_AM_NUM_PROFILES 5
+
+/* All profiles sizes must be MLX5E_PARAMS_AM_NUM_PROFILES */
+#define MLX5_AM_EQE_PROFILES { \
+ {1, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {8, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {64, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {128, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+ {256, MLX5E_AM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
+}
+
+#define MLX5_AM_CQE_PROFILES { \
+ {2, 256}, \
+ {8, 128}, \
+ {16, 64}, \
+ {32, 64}, \
+ {64, 64} \
+}
+
+static const struct mlx5e_cq_moder
+profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
+ MLX5_AM_EQE_PROFILES,
+ MLX5_AM_CQE_PROFILES,
+};
+
+static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
+{
+ return profile[cq_period_mode][ix];
+}
+
+struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
+{
+ int default_profile_ix;
+
+ if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+ default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_CQE;
+ else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
+ default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
+
+ return profile[rx_cq_period_mode][default_profile_ix];
+}
+
+/* Adaptive moderation logic */
+enum {
+ MLX5E_AM_START_MEASURE,
+ MLX5E_AM_MEASURE_IN_PROGRESS,
+ MLX5E_AM_APPLY_NEW_PROFILE,
+};
+
+enum {
+ MLX5E_AM_PARKING_ON_TOP,
+ MLX5E_AM_PARKING_TIRED,
+ MLX5E_AM_GOING_RIGHT,
+ MLX5E_AM_GOING_LEFT,
+};
+
+enum {
+ MLX5E_AM_STATS_WORSE,
+ MLX5E_AM_STATS_SAME,
+ MLX5E_AM_STATS_BETTER,
+};
+
+enum {
+ MLX5E_AM_STEPPED,
+ MLX5E_AM_TOO_TIRED,
+ MLX5E_AM_ON_EDGE,
+};
+
+static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
+{
+ switch (am->tune_state) {
+ case MLX5E_AM_PARKING_ON_TOP:
+ case MLX5E_AM_PARKING_TIRED:
+ WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n");
+ return true;
+ case MLX5E_AM_GOING_RIGHT:
+ return (am->steps_left > 1) && (am->steps_right == 1);
+ default: /* MLX5E_AM_GOING_LEFT */
+ return (am->steps_right > 1) && (am->steps_left == 1);
+ }
+}
+
+static void mlx5e_am_turn(struct mlx5e_rx_am *am)
+{
+ switch (am->tune_state) {
+ case MLX5E_AM_PARKING_ON_TOP:
+ case MLX5E_AM_PARKING_TIRED:
+ WARN_ONCE(true, "mlx5e_am_turn: PARKING\n");
+ break;
+ case MLX5E_AM_GOING_RIGHT:
+ am->tune_state = MLX5E_AM_GOING_LEFT;
+ am->steps_left = 0;
+ break;
+ case MLX5E_AM_GOING_LEFT:
+ am->tune_state = MLX5E_AM_GOING_RIGHT;
+ am->steps_right = 0;
+ break;
+ }
+}
+
+static int mlx5e_am_step(struct mlx5e_rx_am *am)
+{
+ if (am->tired == (MLX5E_PARAMS_AM_NUM_PROFILES * 2))
+ return MLX5E_AM_TOO_TIRED;
+
+ switch (am->tune_state) {
+ case MLX5E_AM_PARKING_ON_TOP:
+ case MLX5E_AM_PARKING_TIRED:
+ WARN_ONCE(true, "mlx5e_am_step: PARKING\n");
+ break;
+ case MLX5E_AM_GOING_RIGHT:
+ if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
+ return MLX5E_AM_ON_EDGE;
+ am->profile_ix++;
+ am->steps_right++;
+ break;
+ case MLX5E_AM_GOING_LEFT:
+ if (am->profile_ix == 0)
+ return MLX5E_AM_ON_EDGE;
+ am->profile_ix--;
+ am->steps_left++;
+ break;
+ }
+
+ am->tired++;
+ return MLX5E_AM_STEPPED;
+}
+
+static void mlx5e_am_park_on_top(struct mlx5e_rx_am *am)
+{
+ am->steps_right = 0;
+ am->steps_left = 0;
+ am->tired = 0;
+ am->tune_state = MLX5E_AM_PARKING_ON_TOP;
+}
+
+static void mlx5e_am_park_tired(struct mlx5e_rx_am *am)
+{
+ am->steps_right = 0;
+ am->steps_left = 0;
+ am->tune_state = MLX5E_AM_PARKING_TIRED;
+}
+
+static void mlx5e_am_exit_parking(struct mlx5e_rx_am *am)
+{
+ am->tune_state = am->profile_ix ? MLX5E_AM_GOING_LEFT :
+ MLX5E_AM_GOING_RIGHT;
+ mlx5e_am_step(am);
+}
+
+static int mlx5e_am_stats_compare(struct mlx5e_rx_am_stats *curr,
+ struct mlx5e_rx_am_stats *prev)
+{
+ int diff;
+
+ if (!prev->ppms)
+ return curr->ppms ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_SAME;
+
+ diff = curr->ppms - prev->ppms;
+ if (((100 * abs(diff)) / prev->ppms) > 10) /* more than 10% diff */
+ return (diff > 0) ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_WORSE;
+
+ if (!prev->epms)
+ return curr->epms ? MLX5E_AM_STATS_WORSE :
+ MLX5E_AM_STATS_SAME;
+
+ diff = curr->epms - prev->epms;
+ if (((100 * abs(diff)) / prev->epms) > 10) /* more than 10% diff */
+ return (diff < 0) ? MLX5E_AM_STATS_BETTER :
+ MLX5E_AM_STATS_WORSE;
+
+ return MLX5E_AM_STATS_SAME;
+}
+
+static bool mlx5e_am_decision(struct mlx5e_rx_am_stats *curr_stats,
+ struct mlx5e_rx_am *am)
+{
+ int prev_state = am->tune_state;
+ int prev_ix = am->profile_ix;
+ int stats_res;
+ int step_res;
+
+ switch (am->tune_state) {
+ case MLX5E_AM_PARKING_ON_TOP:
+ stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
+ if (stats_res != MLX5E_AM_STATS_SAME)
+ mlx5e_am_exit_parking(am);
+ break;
+
+ case MLX5E_AM_PARKING_TIRED:
+ am->tired--;
+ if (!am->tired)
+ mlx5e_am_exit_parking(am);
+ break;
+
+ case MLX5E_AM_GOING_RIGHT:
+ case MLX5E_AM_GOING_LEFT:
+ stats_res = mlx5e_am_stats_compare(curr_stats, &am->prev_stats);
+ if (stats_res != MLX5E_AM_STATS_BETTER)
+ mlx5e_am_turn(am);
+
+ if (mlx5e_am_on_top(am)) {
+ mlx5e_am_park_on_top(am);
+ break;
+ }
+
+ step_res = mlx5e_am_step(am);
+ switch (step_res) {
+ case MLX5E_AM_ON_EDGE:
+ mlx5e_am_park_on_top(am);
+ break;
+ case MLX5E_AM_TOO_TIRED:
+ mlx5e_am_park_tired(am);
+ break;
+ }
+
+ break;
+ }
+
+ if ((prev_state != MLX5E_AM_PARKING_ON_TOP) ||
+ (am->tune_state != MLX5E_AM_PARKING_ON_TOP))
+ am->prev_stats = *curr_stats;
+
+ return am->profile_ix != prev_ix;
+}
+
+static void mlx5e_am_sample(struct mlx5e_rq *rq,
+ struct mlx5e_rx_am_sample *s)
+{
+ s->time = ktime_get();
+ s->pkt_ctr = rq->stats.packets;
+ s->event_ctr = rq->cq.event_ctr;
+}
+
+#define MLX5E_AM_NEVENTS 64
+
+static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
+ struct mlx5e_rx_am_sample *end,
+ struct mlx5e_rx_am_stats *curr_stats)
+{
+ /* u32 holds up to 71 minutes, should be enough */
+ u32 delta_us = ktime_us_delta(end->time, start->time);
+ unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
+
+ if (!delta_us) {
+ WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n");
+ return;
+ }
+
+ curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
+ curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
+}
+
+void mlx5e_rx_am_work(struct work_struct *work)
+{
+ struct mlx5e_rx_am *am = container_of(work, struct mlx5e_rx_am,
+ work);
+ struct mlx5e_rq *rq = container_of(am, struct mlx5e_rq, am);
+ struct mlx5e_cq_moder cur_profile = profile[am->mode][am->profile_ix];
+
+ mlx5_core_modify_cq_moderation(rq->priv->mdev, &rq->cq.mcq,
+ cur_profile.usec, cur_profile.pkts);
+
+ am->state = MLX5E_AM_START_MEASURE;
+}
+
+void mlx5e_rx_am(struct mlx5e_rq *rq)
+{
+ struct mlx5e_rx_am *am = &rq->am;
+ struct mlx5e_rx_am_sample end_sample;
+ struct mlx5e_rx_am_stats curr_stats;
+ u16 nevents;
+
+ switch (am->state) {
+ case MLX5E_AM_MEASURE_IN_PROGRESS:
+ nevents = rq->cq.event_ctr - am->start_sample.event_ctr;
+ if (nevents < MLX5E_AM_NEVENTS)
+ break;
+ mlx5e_am_sample(rq, &end_sample);
+ mlx5e_am_calc_stats(&am->start_sample, &end_sample,
+ &curr_stats);
+ if (mlx5e_am_decision(&curr_stats, am)) {
+ am->state = MLX5E_AM_APPLY_NEW_PROFILE;
+ schedule_work(&am->work);
+ break;
+ }
+ /* fall through */
+ case MLX5E_AM_START_MEASURE:
+ mlx5e_am_sample(rq, &am->start_sample);
+ am->state = MLX5E_AM_MEASURE_IN_PROGRESS;
+ break;
+ case MLX5E_AM_APPLY_NEW_PROFILE:
+ break;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
new file mode 100644
index 000000000000..499487ce3b53
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -0,0 +1,358 @@
+/*
+ * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+#ifndef __MLX5_EN_STATS_H__
+#define __MLX5_EN_STATS_H__
+
+#define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
+ (*(u64 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
+ be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
+ (*(u32 *)((char *)ptr + dsc[i].offset))
+#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
+ be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
+
+#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
+#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
+#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
+
+struct counter_desc {
+ char format[ETH_GSTRING_LEN];
+ int offset; /* Byte offset */
+};
+
+struct mlx5e_sw_stats {
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 tx_tso_packets;
+ u64 tx_tso_bytes;
+ u64 tx_tso_inner_packets;
+ u64 tx_tso_inner_bytes;
+ u64 rx_lro_packets;
+ u64 rx_lro_bytes;
+ u64 rx_csum_unnecessary;
+ u64 rx_csum_none;
+ u64 rx_csum_complete;
+ u64 rx_csum_unnecessary_inner;
+ u64 tx_csum_partial;
+ u64 tx_csum_partial_inner;
+ u64 tx_queue_stopped;
+ u64 tx_queue_wake;
+ u64 tx_queue_dropped;
+ u64 tx_xmit_more;
+ u64 rx_wqe_err;
+ u64 rx_mpwqe_filler;
+ u64 rx_mpwqe_frag;
+ u64 rx_buff_alloc_err;
+ u64 rx_cqe_compress_blks;
+ u64 rx_cqe_compress_pkts;
+
+ /* Special handling counters */
+ u64 link_down_events_phy;
+};
+
+static const struct counter_desc sw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
+};
+
+struct mlx5e_qcounter_stats {
+ u32 rx_out_of_buffer;
+};
+
+static const struct counter_desc q_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
+};
+
+#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
+#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
+ vstats->query_vport_out, c)
+
+struct mlx5e_vport_stats {
+ __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
+};
+
+static const struct counter_desc vport_stats_desc[] = {
+ { "rx_vport_unicast_packets",
+ VPORT_COUNTER_OFF(received_eth_unicast.packets) },
+ { "rx_vport_unicast_bytes",
+ VPORT_COUNTER_OFF(received_eth_unicast.octets) },
+ { "tx_vport_unicast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
+ { "tx_vport_unicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
+ { "rx_vport_multicast_packets",
+ VPORT_COUNTER_OFF(received_eth_multicast.packets) },
+ { "rx_vport_multicast_bytes",
+ VPORT_COUNTER_OFF(received_eth_multicast.octets) },
+ { "tx_vport_multicast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
+ { "tx_vport_multicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
+ { "rx_vport_broadcast_packets",
+ VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
+ { "rx_vport_broadcast_bytes",
+ VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
+ { "tx_vport_broadcast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
+ { "tx_vport_broadcast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
+ { "rx_vport_rdma_unicast_packets",
+ VPORT_COUNTER_OFF(received_ib_unicast.packets) },
+ { "rx_vport_rdma_unicast_bytes",
+ VPORT_COUNTER_OFF(received_ib_unicast.octets) },
+ { "tx_vport_rdma_unicast_packets",
+ VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
+ { "tx_vport_rdma_unicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
+ { "rx_vport_rdma_multicast_packets",
+ VPORT_COUNTER_OFF(received_ib_multicast.packets) },
+ { "rx_vport_rdma_multicast_bytes",
+ VPORT_COUNTER_OFF(received_ib_multicast.octets) },
+ { "tx_vport_rdma_multicast_packets",
+ VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
+ { "tx_vport_rdma_multicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
+};
+
+#define PPORT_802_3_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+#define PPORT_802_3_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
+ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+#define PPORT_2863_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+#define PPORT_2863_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
+ counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+#define PPORT_2819_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+#define PPORT_2819_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
+ counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+#define PPORT_PER_PRIO_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_per_prio_grp_data_layout.c##_high)
+#define PPORT_PER_PRIO_GET(pstats, prio, c) \
+ MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
+ counter_set.eth_per_prio_grp_data_layout.c##_high)
+#define NUM_PPORT_PRIO 8
+
+struct mlx5e_pport_stats {
+ __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+};
+
+static const struct counter_desc pport_802_3_stats_desc[] = {
+ { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
+ { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
+ { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
+ { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
+ { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
+ { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
+ { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
+ { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
+ { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
+ { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
+ { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
+ { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
+ { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
+ { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
+ { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
+ { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
+ { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
+ { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
+};
+
+static const struct counter_desc pport_2863_stats_desc[] = {
+ { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
+ { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
+ { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
+};
+
+static const struct counter_desc pport_2819_stats_desc[] = {
+ { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
+ { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
+ { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
+ { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
+ { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
+ { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
+ { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
+ { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
+ { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
+ { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
+ { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
+ { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
+ { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
+};
+
+static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
+ { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
+ { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+ { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
+ { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
+};
+
+static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
+ /* %s is "global" or "prio{i}" */
+ { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+ { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+ { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+ { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+ { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+};
+
+struct mlx5e_rq_stats {
+ u64 packets;
+ u64 bytes;
+ u64 csum_complete;
+ u64 csum_unnecessary_inner;
+ u64 csum_none;
+ u64 lro_packets;
+ u64 lro_bytes;
+ u64 wqe_err;
+ u64 mpwqe_filler;
+ u64 mpwqe_frag;
+ u64 buff_alloc_err;
+ u64 cqe_compress_blks;
+ u64 cqe_compress_pkts;
+};
+
+static const struct counter_desc rq_stats_desc[] = {
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_frag) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+};
+
+struct mlx5e_sq_stats {
+ /* commonly accessed in data path */
+ u64 packets;
+ u64 bytes;
+ u64 xmit_more;
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 tso_inner_packets;
+ u64 tso_inner_bytes;
+ u64 csum_partial_inner;
+ u64 nop;
+ /* less likely accessed in data path */
+ u64 csum_none;
+ u64 stopped;
+ u64 wake;
+ u64 dropped;
+};
+
+static const struct counter_desc sq_stats_desc[] = {
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
+};
+
+#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
+#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
+#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
+#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
+#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
+#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
+#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
+ ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
+#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
+ ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
+#define NUM_PPORT_COUNTERS (NUM_PPORT_802_3_COUNTERS + \
+ NUM_PPORT_2863_COUNTERS + \
+ NUM_PPORT_2819_COUNTERS + \
+ NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
+ NUM_PPORT_PRIO)
+#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
+#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
+
+struct mlx5e_stats {
+ struct mlx5e_sw_stats sw;
+ struct mlx5e_qcounter_stats qcnt;
+ struct mlx5e_vport_stats vport;
+ struct mlx5e_pport_stats pport;
+};
+
+#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index b3de09f13425..22cfc4ac1837 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -37,8 +37,11 @@
#include <linux/mlx5/fs.h>
#include <linux/mlx5/device.h>
#include <linux/rhashtable.h>
+#include <net/switchdev.h>
+#include <net/tc_act/tc_mirred.h>
#include "en.h"
#include "en_tc.h"
+#include "eswitch.h"
struct mlx5e_tc_flow {
struct rhash_head node;
@@ -46,64 +49,109 @@ struct mlx5e_tc_flow {
struct mlx5_flow_rule *rule;
};
-#define MLX5E_TC_FLOW_TABLE_NUM_ENTRIES 1024
-#define MLX5E_TC_FLOW_TABLE_NUM_GROUPS 4
+#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
+#define MLX5E_TC_TABLE_NUM_GROUPS 4
-static struct mlx5_flow_rule *mlx5e_tc_add_flow(struct mlx5e_priv *priv,
- u32 *match_c, u32 *match_v,
- u32 action, u32 flow_tag)
+static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ u32 action, u32 flow_tag)
{
- struct mlx5_flow_destination dest = {
- .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
- {.ft = priv->fts.vlan.t},
- };
+ struct mlx5_core_dev *dev = priv->mdev;
+ struct mlx5_flow_destination dest = { 0 };
+ struct mlx5_fc *counter = NULL;
struct mlx5_flow_rule *rule;
bool table_created = false;
- if (IS_ERR_OR_NULL(priv->fts.tc.t)) {
- priv->fts.tc.t =
- mlx5_create_auto_grouped_flow_table(priv->fts.ns, 0,
- MLX5E_TC_FLOW_TABLE_NUM_ENTRIES,
- MLX5E_TC_FLOW_TABLE_NUM_GROUPS);
- if (IS_ERR(priv->fts.tc.t)) {
+ if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = priv->fs.vlan.ft.t;
+ } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ counter = mlx5_fc_create(dev, true);
+ if (IS_ERR(counter))
+ return ERR_CAST(counter);
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter = counter;
+ }
+
+ if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
+ priv->fs.tc.t =
+ mlx5_create_auto_grouped_flow_table(priv->fs.ns,
+ MLX5E_TC_PRIO,
+ MLX5E_TC_TABLE_NUM_ENTRIES,
+ MLX5E_TC_TABLE_NUM_GROUPS,
+ 0);
+ if (IS_ERR(priv->fs.tc.t)) {
netdev_err(priv->netdev,
"Failed to create tc offload table\n");
- return ERR_CAST(priv->fts.tc.t);
+ rule = ERR_CAST(priv->fs.tc.t);
+ goto err_create_ft;
}
table_created = true;
}
- rule = mlx5_add_flow_rule(priv->fts.tc.t, MLX5_MATCH_OUTER_HEADERS,
- match_c, match_v,
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
action, flow_tag,
- action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST ? &dest : NULL);
+ &dest);
- if (IS_ERR(rule) && table_created) {
- mlx5_destroy_flow_table(priv->fts.tc.t);
- priv->fts.tc.t = NULL;
+ if (IS_ERR(rule))
+ goto err_add_rule;
+
+ return rule;
+
+err_add_rule:
+ if (table_created) {
+ mlx5_destroy_flow_table(priv->fs.tc.t);
+ priv->fs.tc.t = NULL;
}
+err_create_ft:
+ mlx5_fc_destroy(dev, counter);
return rule;
}
+static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
+ struct mlx5_flow_spec *spec,
+ u32 action, u32 dst_vport)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
+ u32 src_vport;
+
+ if (rep->vport) /* set source vport for the flow */
+ src_vport = rep->vport;
+ else
+ src_vport = FDB_UPLINK_VPORT;
+
+ return mlx5_eswitch_add_offloaded_rule(esw, spec, action, src_vport, dst_vport);
+}
+
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5_flow_rule *rule)
{
+ struct mlx5_fc *counter = NULL;
+
+ counter = mlx5_flow_rule_counter(rule);
+
mlx5_del_flow_rule(rule);
- if (!mlx5e_tc_num_filters(priv)) {
- mlx5_destroy_flow_table(priv->fts.tc.t);
- priv->fts.tc.t = NULL;
+ mlx5_fc_destroy(priv->mdev, counter);
+
+ if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
+ mlx5_destroy_flow_table(priv->fs.tc.t);
+ priv->fs.tc.t = NULL;
}
}
-static int parse_cls_flower(struct mlx5e_priv *priv,
- u32 *match_c, u32 *match_v,
+static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
struct tc_cls_flower_offload *f)
{
- void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, outer_headers);
- void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers);
+ void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+ void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
u16 addr_type = 0;
u8 ip_proto = 0;
@@ -122,7 +170,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_dissector_key_control *key =
skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
+ FLOW_DISSECTOR_KEY_CONTROL,
f->key);
addr_type = key->addr_type;
}
@@ -266,10 +314,11 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
return 0;
}
-static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
- u32 *action, u32 *flow_tag)
+static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+ u32 *action, u32 *flow_tag)
{
const struct tc_action *a;
+ LIST_HEAD(actions);
if (tc_no_actions(exts))
return -EINVAL;
@@ -277,13 +326,17 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
*flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
*action = 0;
- tc_for_each_action(a, exts) {
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
/* Only support a single action per rule */
if (*action)
return -EINVAL;
if (is_tcf_gact_shot(a)) {
*action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
+ if (MLX5_CAP_FLOWTABLE(priv->mdev,
+ flow_table_properties_nic_receive.flow_counter))
+ *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
continue;
}
@@ -307,17 +360,68 @@ static int parse_tc_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return 0;
}
+static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
+ u32 *action, u32 *dest_vport)
+{
+ const struct tc_action *a;
+ LIST_HEAD(actions);
+
+ if (tc_no_actions(exts))
+ return -EINVAL;
+
+ *action = 0;
+
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ /* Only support a single action per rule */
+ if (*action)
+ return -EINVAL;
+
+ if (is_tcf_gact_shot(a)) {
+ *action = MLX5_FLOW_CONTEXT_ACTION_DROP |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ continue;
+ }
+
+ if (is_tcf_mirred_redirect(a)) {
+ int ifindex = tcf_mirred_ifindex(a);
+ struct net_device *out_dev;
+ struct mlx5e_priv *out_priv;
+ struct mlx5_eswitch_rep *out_rep;
+
+ out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
+
+ if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) {
+ pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
+ priv->netdev->name, out_dev->name);
+ return -EINVAL;
+ }
+
+ out_priv = netdev_priv(out_dev);
+ out_rep = out_priv->ppriv;
+ if (out_rep->vport == 0)
+ *dest_vport = FDB_UPLINK_VPORT;
+ else
+ *dest_vport = out_rep->vport;
+ *action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ continue;
+ }
+
+ return -EINVAL;
+ }
+ return 0;
+}
+
int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
struct tc_cls_flower_offload *f)
{
- struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
- u32 *match_c;
- u32 *match_v;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
int err = 0;
- u32 flow_tag;
- u32 action;
+ u32 flow_tag, action, dest_vport = 0;
struct mlx5e_tc_flow *flow;
+ struct mlx5_flow_spec *spec;
struct mlx5_flow_rule *old = NULL;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
tc->ht_params);
@@ -326,49 +430,53 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
else
flow = kzalloc(sizeof(*flow), GFP_KERNEL);
- match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- if (!match_c || !match_v || !flow) {
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec || !flow) {
err = -ENOMEM;
goto err_free;
}
flow->cookie = f->cookie;
- err = parse_cls_flower(priv, match_c, match_v, f);
+ err = parse_cls_flower(priv, spec, f);
if (err < 0)
goto err_free;
- err = parse_tc_actions(priv, f->exts, &action, &flow_tag);
- if (err < 0)
+ if (esw && esw->mode == SRIOV_OFFLOADS) {
+ err = parse_tc_fdb_actions(priv, f->exts, &action, &dest_vport);
+ if (err < 0)
+ goto err_free;
+ flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, action, dest_vport);
+ } else {
+ err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
+ if (err < 0)
+ goto err_free;
+ flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
+ }
+
+ if (IS_ERR(flow->rule)) {
+ err = PTR_ERR(flow->rule);
goto err_free;
+ }
err = rhashtable_insert_fast(&tc->ht, &flow->node,
tc->ht_params);
if (err)
- goto err_free;
-
- flow->rule = mlx5e_tc_add_flow(priv, match_c, match_v, action,
- flow_tag);
- if (IS_ERR(flow->rule)) {
- err = PTR_ERR(flow->rule);
- goto err_hash_del;
- }
+ goto err_del_rule;
if (old)
mlx5e_tc_del_flow(priv, old);
goto out;
-err_hash_del:
- rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
+err_del_rule:
+ mlx5_del_flow_rule(flow->rule);
err_free:
if (!old)
kfree(flow);
out:
- kfree(match_c);
- kfree(match_v);
+ kvfree(spec);
return err;
}
@@ -376,7 +484,7 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f)
{
struct mlx5e_tc_flow *flow;
- struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
tc->ht_params);
@@ -392,6 +500,36 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv,
return 0;
}
+int mlx5e_stats_flower(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f)
+{
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
+ struct mlx5e_tc_flow *flow;
+ struct tc_action *a;
+ struct mlx5_fc *counter;
+ LIST_HEAD(actions);
+ u64 bytes;
+ u64 packets;
+ u64 lastuse;
+
+ flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
+ tc->ht_params);
+ if (!flow)
+ return -EINVAL;
+
+ counter = mlx5_flow_rule_counter(flow->rule);
+ if (!counter)
+ return 0;
+
+ mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
+
+ tcf_exts_to_list(f->exts, &actions);
+ list_for_each_entry(a, &actions, list)
+ tcf_action_stats_update(a, bytes, packets, lastuse);
+
+ return 0;
+}
+
static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
.head_offset = offsetof(struct mlx5e_tc_flow, node),
.key_offset = offsetof(struct mlx5e_tc_flow, cookie),
@@ -401,7 +539,7 @@ static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
int mlx5e_tc_init(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
tc->ht_params = mlx5e_tc_flow_ht_params;
return rhashtable_init(&tc->ht, &tc->ht_params);
@@ -418,12 +556,12 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg)
void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
{
- struct mlx5e_tc_flow_table *tc = &priv->fts.tc;
+ struct mlx5e_tc_table *tc = &priv->fs.tc;
rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
- if (!IS_ERR_OR_NULL(priv->fts.tc.t)) {
- mlx5_destroy_flow_table(priv->fts.tc.t);
- priv->fts.tc.t = NULL;
+ if (!IS_ERR_OR_NULL(tc->t)) {
+ mlx5_destroy_flow_table(tc->t);
+ tc->t = NULL;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index d677428dc10f..34bf903fc886 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -43,9 +43,12 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
int mlx5e_delete_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *f);
+int mlx5e_stats_flower(struct mlx5e_priv *priv,
+ struct tc_cls_flower_offload *f);
+
static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
{
- return atomic_read(&priv->fts.tc.ht.nelems);
+ return atomic_read(&priv->fs.tc.ht.nelems);
}
#endif /* __MLX5_EN_TC_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 1ffc7cb6f78c..eb0e72537f10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -54,10 +54,11 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
sq->skb[pi] = NULL;
sq->pc++;
+ sq->stats.nop++;
if (notify_hw) {
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, wqe, 0);
+ mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
}
}
@@ -109,12 +110,68 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
{
struct mlx5e_priv *priv = netdev_priv(dev);
int channel_ix = fallback(dev, skb);
- int up = (netdev_get_num_tc(dev) && skb_vlan_tag_present(skb)) ?
- skb->vlan_tci >> VLAN_PRIO_SHIFT : 0;
+ int up = 0;
+
+ if (!netdev_get_num_tc(dev))
+ return channel_ix;
+
+ if (skb_vlan_tag_present(skb))
+ up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+
+ /* channel_ix can be larger than num_channels since
+ * dev->num_real_tx_queues = num_channels * num_tc
+ */
+ if (channel_ix >= priv->params.num_channels)
+ channel_ix = reciprocal_scale(channel_ix,
+ priv->params.num_channels);
return priv->channeltc_to_txq_map[channel_ix][up];
}
+static inline int mlx5e_skb_l2_header_offset(struct sk_buff *skb)
+{
+#define MLX5E_MIN_INLINE (ETH_HLEN + VLAN_HLEN)
+
+ return max(skb_network_offset(skb), MLX5E_MIN_INLINE);
+}
+
+static inline int mlx5e_skb_l3_header_offset(struct sk_buff *skb)
+{
+ struct flow_keys keys;
+
+ if (skb_transport_header_was_set(skb))
+ return skb_transport_offset(skb);
+ else if (skb_flow_dissect_flow_keys(skb, &keys, 0))
+ return keys.control.thoff;
+ else
+ return mlx5e_skb_l2_header_offset(skb);
+}
+
+static inline unsigned int mlx5e_calc_min_inline(enum mlx5_inline_modes mode,
+ struct sk_buff *skb)
+{
+ int hlen;
+
+ switch (mode) {
+ case MLX5_INLINE_MODE_TCP_UDP:
+ hlen = eth_get_headlen(skb->data, skb_headlen(skb));
+ if (hlen == ETH_HLEN && !skb_vlan_tag_present(skb))
+ hlen += VLAN_HLEN;
+ return hlen;
+ case MLX5_INLINE_MODE_IP:
+ /* When transport header is set to zero, it means no transport
+ * header. When transport header is set to 0xff's, it means
+ * transport header wasn't set.
+ */
+ if (skb_transport_offset(skb))
+ return mlx5e_skb_l3_header_offset(skb);
+ /* fall through */
+ case MLX5_INLINE_MODE_L2:
+ default:
+ return mlx5e_skb_l2_header_offset(skb);
+ }
+}
+
static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
struct sk_buff *skb, bool bf)
{
@@ -122,8 +179,6 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
* headers and occur before the data gather.
* Therefore these headers must be copied into the WQE
*/
-#define MLX5E_MIN_INLINE ETH_HLEN
-
if (bf) {
u16 ihs = skb_headlen(skb);
@@ -133,8 +188,7 @@ static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
if (ihs <= sq->max_inline)
return skb_headlen(skb);
}
-
- return MLX5E_MIN_INLINE;
+ return mlx5e_calc_min_inline(sq->min_inline_mode, skb);
}
static inline void mlx5e_tx_skb_pull_inline(unsigned char **skb_data,
@@ -191,12 +245,12 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
if (skb->encapsulation) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM |
MLX5_ETH_WQE_L4_INNER_CSUM;
- sq->stats.csum_offload_inner++;
+ sq->stats.csum_partial_inner++;
} else {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
}
} else
- sq->stats.csum_offload_none++;
+ sq->stats.csum_none++;
if (sq->cc != sq->prev_cc) {
sq->prev_cc = sq->cc;
@@ -302,6 +356,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.stopped++;
}
+ sq->stats.xmit_more += skb->xmit_more;
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
int bf_sz = 0;
@@ -309,14 +364,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
bf_sz = wi->num_wqebbs << 3;
cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
- mlx5e_tx_notify_hw(sq, wqe, bf_sz);
+ mlx5e_tx_notify_hw(sq, &wqe->ctrl, bf_sz);
}
/* fill sq edge with nops to avoid wqe wrap around */
while ((sq->pc & wq->sz_m1) > sq->edge)
mlx5e_send_nop(sq, false);
- sq->bf_budget = bf ? sq->bf_budget - 1 : 0;
+ if (bf)
+ sq->bf_budget--;
sq->stats.packets++;
sq->stats.bytes += num_bytes;
@@ -350,6 +406,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_sq, cq);
+ if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+ return false;
+
npkts = 0;
nbytes = 0;
@@ -387,7 +446,6 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
wi = &sq->wqe_info[ci];
if (unlikely(!skb)) { /* nop */
- sq->stats.nop++;
sqcc++;
continue;
}
@@ -426,11 +484,39 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) &&
- mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
- likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
- netif_tx_wake_queue(sq->txq);
- sq->stats.wake++;
+ mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) {
+ netif_tx_wake_queue(sq->txq);
+ sq->stats.wake++;
}
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
+
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+{
+ struct mlx5e_tx_wqe_info *wi;
+ struct sk_buff *skb;
+ u16 ci;
+ int i;
+
+ while (sq->cc != sq->pc) {
+ ci = sq->cc & sq->wq.sz_m1;
+ skb = sq->skb[ci];
+ wi = &sq->wqe_info[ci];
+
+ if (!skb) { /* nop */
+ sq->cc++;
+ continue;
+ }
+
+ for (i = 0; i < wi->num_dma; i++) {
+ struct mlx5e_sq_dma *dma =
+ mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+ }
+
+ dev_kfree_skb_any(skb);
+ sq->cc += wi->num_wqebbs;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 9bb4395aceeb..9bf33bb69210 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -49,6 +49,62 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
return cqe;
}
+static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq);
+ struct mlx5_wq_cyc *wq;
+ struct mlx5_cqe64 *cqe;
+ u16 sqcc;
+
+ if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+ return;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (likely(!cqe))
+ return;
+
+ wq = &sq->wq;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ do {
+ u16 ci = be16_to_cpu(cqe->wqe_counter) & wq->sz_m1;
+ struct mlx5e_ico_wqe_info *icowi = &sq->ico_wqe_info[ci];
+
+ mlx5_cqwq_pop(&cq->wq);
+ sqcc += icowi->num_wqebbs;
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) {
+ WARN_ONCE(true, "mlx5e: Bad OP in ICOSQ CQE: 0x%x\n",
+ cqe->op_own);
+ break;
+ }
+
+ switch (icowi->opcode) {
+ case MLX5_OPCODE_NOP:
+ break;
+ case MLX5_OPCODE_UMR:
+ mlx5e_post_rx_fragmented_mpwqe(&sq->channel->rq);
+ break;
+ default:
+ WARN_ONCE(true,
+ "mlx5e: Bad OPCODE in ICOSQ WQE info: 0x%x\n",
+ icowi->opcode);
+ }
+
+ } while ((cqe = mlx5e_get_cqe(cq)));
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->cc = sqcc;
+}
+
int mlx5e_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
@@ -64,6 +120,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget);
busy |= work_done == budget;
+
+ mlx5e_poll_ico_cq(&c->icosq.cq);
+
busy |= mlx5e_post_rx_wqes(&c->rq);
if (busy)
@@ -79,7 +138,12 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
for (i = 0; i < c->num_tc; i++)
mlx5e_cq_arm(&c->sq[i].cq);
+
+ if (test_bit(MLX5E_RQ_STATE_AM, &c->rq.state))
+ mlx5e_rx_am(&c->rq);
+
mlx5e_cq_arm(&c->rq.cq);
+ mlx5e_cq_arm(&c->icosq.cq);
return work_done;
}
@@ -88,8 +152,8 @@ void mlx5e_completion_event(struct mlx5_core_cq *mcq)
{
struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+ cq->event_ctr++;
set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
- barrier();
napi_schedule(cq->napi);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 18fccec72c5d..0e30602ef76d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -202,7 +202,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
struct mlx5_eqe *eqe;
int eqes_found = 0;
int set_ci = 0;
- u32 cqn;
+ u32 cqn = -1;
u32 rsn;
u8 port;
@@ -320,6 +320,9 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
eq_update_ci(eq, 1);
+ if (cqn != -1)
+ tasklet_schedule(&eq->tasklet_ctx.task);
+
return eqes_found;
}
@@ -403,6 +406,12 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
if (err)
goto err_irq;
+ INIT_LIST_HEAD(&eq->tasklet_ctx.list);
+ INIT_LIST_HEAD(&eq->tasklet_ctx.process_list);
+ spin_lock_init(&eq->tasklet_ctx.lock);
+ tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb,
+ (unsigned long)&eq->tasklet_ctx);
+
/* EQs are created in ARMED state
*/
eq_update_ci(eq, 1);
@@ -436,6 +445,7 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
synchronize_irq(eq->irqn);
+ tasklet_disable(&eq->tasklet_ctx.task);
mlx5_buf_free(dev, &eq->buf);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index bc3d9f8a75c1..b247949df135 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -40,17 +40,6 @@
#define UPLINK_VPORT 0xFFFF
-#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
-
-#define esw_info(dev, format, ...) \
- pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
-
-#define esw_warn(dev, format, ...) \
- pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
-
-#define esw_debug(dev, format, ...) \
- mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
-
enum {
MLX5_ACTION_NONE = 0,
MLX5_ACTION_ADD = 1,
@@ -77,16 +66,23 @@ struct vport_addr {
u8 action;
u32 vport;
struct mlx5_flow_rule *flow_rule; /* SRIOV only */
+ /* A flag indicating that mac was added due to mc promiscuous vport */
+ bool mc_promisc;
};
enum {
UC_ADDR_CHANGE = BIT(0),
MC_ADDR_CHANGE = BIT(1),
+ PROMISC_CHANGE = BIT(3),
};
/* Vport context events */
#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
- MC_ADDR_CHANGE)
+ MC_ADDR_CHANGE | \
+ PROMISC_CHANGE)
+
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports);
+void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports);
static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
u32 events_mask)
@@ -116,6 +112,9 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
if (events_mask & MC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_mc_address_change, 1);
+ if (events_mask & PROMISC_CHANGE)
+ MLX5_SET(nic_vport_context, nic_vport_ctx,
+ event_on_promisc_change, 1);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (err)
@@ -323,30 +322,45 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index)
/* E-Switch FDB */
static struct mlx5_flow_rule *
-esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
+__esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
+ u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
{
- int match_header = MLX5_MATCH_OUTER_HEADERS;
- struct mlx5_flow_destination dest;
+ int match_header = (is_zero_ether_addr(mac_c) ? 0 :
+ MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_rule *flow_rule = NULL;
- u32 *match_v;
- u32 *match_c;
- u8 *dmac_v;
- u8 *dmac_c;
-
- match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
- if (!match_v || !match_c) {
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_spec *spec;
+ void *mv_misc = NULL;
+ void *mc_misc = NULL;
+ u8 *dmac_v = NULL;
+ u8 *dmac_c = NULL;
+
+ if (rx_rule)
+ match_header |= MLX5_MATCH_MISC_PARAMETERS;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
pr_warn("FDB: Failed to alloc match parameters\n");
- goto out;
+ return NULL;
}
- dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
+ dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers.dmac_47_16);
- dmac_c = MLX5_ADDR_OF(fte_match_param, match_c,
+ dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
outer_headers.dmac_47_16);
- ether_addr_copy(dmac_v, mac);
- /* Match criteria mask */
- memset(dmac_c, 0xff, 6);
+ if (match_header & MLX5_MATCH_OUTER_HEADERS) {
+ ether_addr_copy(dmac_v, mac_v);
+ ether_addr_copy(dmac_c, mac_c);
+ }
+
+ if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
+ mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+ mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ MLX5_SET(fte_match_set_misc, mv_misc, source_port, UPLINK_VPORT);
+ MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
+ }
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport;
@@ -354,26 +368,56 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
esw_debug(esw->dev,
"\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
dmac_v, dmac_c, vport);
+ spec->match_criteria_enable = match_header;
flow_rule =
- mlx5_add_flow_rule(esw->fdb_table.fdb,
- match_header,
- match_c,
- match_v,
+ mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
- if (IS_ERR_OR_NULL(flow_rule)) {
+ if (IS_ERR(flow_rule)) {
pr_warn(
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
flow_rule = NULL;
}
-out:
- kfree(match_v);
- kfree(match_c);
+
+ kvfree(spec);
return flow_rule;
}
-static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
+{
+ u8 mac_c[ETH_ALEN];
+
+ eth_broadcast_addr(mac_c);
+ return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
+}
+
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport)
+{
+ u8 mac_c[ETH_ALEN];
+ u8 mac_v[ETH_ALEN];
+
+ eth_zero_addr(mac_c);
+ eth_zero_addr(mac_v);
+ mac_c[0] = 0x01;
+ mac_v[0] = 0x01;
+ return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
+}
+
+static struct mlx5_flow_rule *
+esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport)
+{
+ u8 mac_c[ETH_ALEN];
+ u8 mac_v[ETH_ALEN];
+
+ eth_zero_addr(mac_c);
+ eth_zero_addr(mac_v);
+ return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
+}
+
+static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_core_dev *dev = esw->dev;
@@ -401,47 +445,97 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
memset(flow_group_in, 0, inlen);
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
- fdb = mlx5_create_flow_table(root_ns, 0, table_size);
- if (IS_ERR_OR_NULL(fdb)) {
+ fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
+ if (IS_ERR(fdb)) {
err = PTR_ERR(fdb);
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
goto out;
}
+ esw->fdb_table.fdb = fdb;
+ /* Addresses group : Full match unicast/multicast addresses */
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
MLX5_MATCH_OUTER_HEADERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+ /* Preserve 2 entries for allmulti and promisc rules*/
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
eth_broadcast_addr(dmac);
-
g = mlx5_create_flow_group(fdb, flow_group_in);
- if (IS_ERR_OR_NULL(g)) {
+ if (IS_ERR(g)) {
err = PTR_ERR(g);
esw_warn(dev, "Failed to create flow group err(%d)\n", err);
goto out;
}
+ esw->fdb_table.legacy.addr_grp = g;
+
+ /* Allmulti group : One rule that forwards any mcast traffic */
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
+ eth_zero_addr(dmac);
+ dmac[0] = 0x01;
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.legacy.allmulti_grp = g;
+
+ /* Promiscuous group :
+ * One rule that forward all unmatched traffic from previous groups
+ */
+ eth_zero_addr(dmac);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
+ goto out;
+ }
+ esw->fdb_table.legacy.promisc_grp = g;
- esw->fdb_table.addr_grp = g;
- esw->fdb_table.fdb = fdb;
out:
- kfree(flow_group_in);
- if (err && !IS_ERR_OR_NULL(fdb))
- mlx5_destroy_flow_table(fdb);
+ if (err) {
+ if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.allmulti_grp)) {
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
+ esw->fdb_table.legacy.allmulti_grp = NULL;
+ }
+ if (!IS_ERR_OR_NULL(esw->fdb_table.legacy.addr_grp)) {
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
+ esw->fdb_table.legacy.addr_grp = NULL;
+ }
+ if (!IS_ERR_OR_NULL(esw->fdb_table.fdb)) {
+ mlx5_destroy_flow_table(esw->fdb_table.fdb);
+ esw->fdb_table.fdb = NULL;
+ }
+ }
+
+ kvfree(flow_group_in);
return err;
}
-static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
+static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
{
if (!esw->fdb_table.fdb)
return;
esw_debug(esw->dev, "Destroy FDB Table\n");
- mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
mlx5_destroy_flow_table(esw->fdb_table.fdb);
esw->fdb_table.fdb = NULL;
- esw->fdb_table.addr_grp = NULL;
+ esw->fdb_table.legacy.addr_grp = NULL;
+ esw->fdb_table.legacy.allmulti_grp = NULL;
+ esw->fdb_table.legacy.promisc_grp = NULL;
}
/* E-Switch vport UC/MC lists management */
@@ -473,7 +567,8 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
if (err)
goto abort;
- if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */
+ /* SRIOV is enabled: Forward UC MAC to vport */
+ if (esw->fdb_table.fdb && esw->mode == SRIOV_LEGACY)
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
@@ -511,6 +606,53 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
return 0;
}
+static void update_allmulti_vports(struct mlx5_eswitch *esw,
+ struct vport_addr *vaddr,
+ struct esw_mc_addr *esw_mc)
+{
+ u8 *mac = vaddr->node.addr;
+ u32 vport_idx = 0;
+
+ for (vport_idx = 0; vport_idx < esw->total_vports; vport_idx++) {
+ struct mlx5_vport *vport = &esw->vports[vport_idx];
+ struct hlist_head *vport_hash = vport->mc_list;
+ struct vport_addr *iter_vaddr =
+ l2addr_hash_find(vport_hash,
+ mac,
+ struct vport_addr);
+ if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
+ vaddr->vport == vport_idx)
+ continue;
+ switch (vaddr->action) {
+ case MLX5_ACTION_ADD:
+ if (iter_vaddr)
+ continue;
+ iter_vaddr = l2addr_hash_add(vport_hash, mac,
+ struct vport_addr,
+ GFP_KERNEL);
+ if (!iter_vaddr) {
+ esw_warn(esw->dev,
+ "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
+ mac, vport_idx);
+ continue;
+ }
+ iter_vaddr->vport = vport_idx;
+ iter_vaddr->flow_rule =
+ esw_fdb_set_vport_rule(esw,
+ mac,
+ vport_idx);
+ iter_vaddr->mc_promisc = true;
+ break;
+ case MLX5_ACTION_DEL:
+ if (!iter_vaddr)
+ continue;
+ mlx5_del_flow_rule(iter_vaddr->flow_rule);
+ l2addr_hash_del(iter_vaddr);
+ break;
+ }
+ }
+}
+
static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
{
struct hlist_head *hash = esw->mc_table;
@@ -531,8 +673,17 @@ static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
+
+ /* Add this multicast mac to all the mc promiscuous vports */
+ update_allmulti_vports(esw, vaddr, esw_mc);
+
add:
- esw_mc->refcnt++;
+ /* If the multicast mac is added as a result of mc promiscuous vport,
+ * don't increment the multicast ref count
+ */
+ if (!vaddr->mc_promisc)
+ esw_mc->refcnt++;
+
/* Forward MC MAC to vport */
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
esw_debug(esw->dev,
@@ -568,9 +719,15 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
mlx5_del_flow_rule(vaddr->flow_rule);
vaddr->flow_rule = NULL;
- if (--esw_mc->refcnt)
+ /* If the multicast mac is added as a result of mc promiscuous vport,
+ * don't decrement the multicast ref count.
+ */
+ if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
return 0;
+ /* Remove this multicast mac from all the mc promiscuous vports */
+ update_allmulti_vports(esw, vaddr, esw_mc);
+
if (esw_mc->uplink_rule)
mlx5_del_flow_rule(esw_mc->uplink_rule);
@@ -643,10 +800,13 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
addr->action = MLX5_ACTION_DEL;
}
+ if (!vport->enabled)
+ goto out;
+
err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
mac_list, &size);
if (err)
- return;
+ goto out;
esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
vport_num, is_uc ? "UC" : "MC", size);
@@ -660,6 +820,24 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
if (addr) {
addr->action = MLX5_ACTION_NONE;
+ /* If this mac was previously added because of allmulti
+ * promiscuous rx mode, its now converted to be original
+ * vport mac.
+ */
+ if (addr->mc_promisc) {
+ struct esw_mc_addr *esw_mc =
+ l2addr_hash_find(esw->mc_table,
+ mac_list[i],
+ struct esw_mc_addr);
+ if (!esw_mc) {
+ esw_warn(esw->dev,
+ "Failed to MAC(%pM) in mcast DB\n",
+ mac_list[i]);
+ continue;
+ }
+ esw_mc->refcnt++;
+ addr->mc_promisc = false;
+ }
continue;
}
@@ -674,13 +852,121 @@ static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
addr->vport = vport_num;
addr->action = MLX5_ACTION_ADD;
}
+out:
kfree(mac_list);
}
-static void esw_vport_change_handler(struct work_struct *work)
+/* Sync vport UC/MC list from vport context
+ * Must be called after esw_update_vport_addr_list
+ */
+static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw, u32 vport_num)
+{
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+ struct l2addr_node *node;
+ struct vport_addr *addr;
+ struct hlist_head *hash;
+ struct hlist_node *tmp;
+ int hi;
+
+ hash = vport->mc_list;
+
+ for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
+ u8 *mac = node->addr;
+
+ addr = l2addr_hash_find(hash, mac, struct vport_addr);
+ if (addr) {
+ if (addr->action == MLX5_ACTION_DEL)
+ addr->action = MLX5_ACTION_NONE;
+ continue;
+ }
+ addr = l2addr_hash_add(hash, mac, struct vport_addr,
+ GFP_KERNEL);
+ if (!addr) {
+ esw_warn(esw->dev,
+ "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
+ mac, vport_num);
+ continue;
+ }
+ addr->vport = vport_num;
+ addr->action = MLX5_ACTION_ADD;
+ addr->mc_promisc = true;
+ }
+}
+
+/* Apply vport rx mode to HW FDB table */
+static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num,
+ bool promisc, bool mc_promisc)
+{
+ struct esw_mc_addr *allmulti_addr = esw->mc_promisc;
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+
+ if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
+ goto promisc;
+
+ if (mc_promisc) {
+ vport->allmulti_rule =
+ esw_fdb_set_vport_allmulti_rule(esw, vport_num);
+ if (!allmulti_addr->uplink_rule)
+ allmulti_addr->uplink_rule =
+ esw_fdb_set_vport_allmulti_rule(esw,
+ UPLINK_VPORT);
+ allmulti_addr->refcnt++;
+ } else if (vport->allmulti_rule) {
+ mlx5_del_flow_rule(vport->allmulti_rule);
+ vport->allmulti_rule = NULL;
+
+ if (--allmulti_addr->refcnt > 0)
+ goto promisc;
+
+ if (allmulti_addr->uplink_rule)
+ mlx5_del_flow_rule(allmulti_addr->uplink_rule);
+ allmulti_addr->uplink_rule = NULL;
+ }
+
+promisc:
+ if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
+ return;
+
+ if (promisc) {
+ vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw,
+ vport_num);
+ } else if (vport->promisc_rule) {
+ mlx5_del_flow_rule(vport->promisc_rule);
+ vport->promisc_rule = NULL;
+ }
+}
+
+/* Sync vport rx mode from vport context */
+static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num)
+{
+ struct mlx5_vport *vport = &esw->vports[vport_num];
+ int promisc_all = 0;
+ int promisc_uc = 0;
+ int promisc_mc = 0;
+ int err;
+
+ err = mlx5_query_nic_vport_promisc(esw->dev,
+ vport_num,
+ &promisc_uc,
+ &promisc_mc,
+ &promisc_all);
+ if (err)
+ return;
+ esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
+ vport_num, promisc_all, promisc_mc);
+
+ if (!vport->trusted || !vport->enabled) {
+ promisc_uc = 0;
+ promisc_mc = 0;
+ promisc_all = 0;
+ }
+
+ esw_apply_vport_rx_mode(esw, vport_num, promisc_all,
+ (promisc_all || promisc_mc));
+}
+
+static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
{
- struct mlx5_vport *vport =
- container_of(work, struct mlx5_vport, vport_change_handler);
struct mlx5_core_dev *dev = vport->dev;
struct mlx5_eswitch *esw = dev->priv.eswitch;
u8 mac[ETH_ALEN];
@@ -699,6 +985,15 @@ static void esw_vport_change_handler(struct work_struct *work)
if (vport->enabled_events & MC_ADDR_CHANGE) {
esw_update_vport_addr_list(esw, vport->vport,
MLX5_NVPRT_LIST_TYPE_MC);
+ }
+
+ if (vport->enabled_events & PROMISC_CHANGE) {
+ esw_update_vport_rx_mode(esw, vport->vport);
+ if (!IS_ERR_OR_NULL(vport->allmulti_rule))
+ esw_update_vport_mc_promisc(esw, vport->vport);
+ }
+
+ if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE)) {
esw_apply_vport_addr_list(esw, vport->vport,
MLX5_NVPRT_LIST_TYPE_MC);
}
@@ -709,15 +1004,459 @@ static void esw_vport_change_handler(struct work_struct *work)
vport->enabled_events);
}
+static void esw_vport_change_handler(struct work_struct *work)
+{
+ struct mlx5_vport *vport =
+ container_of(work, struct mlx5_vport, vport_change_handler);
+ struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
+
+ mutex_lock(&esw->state_lock);
+ esw_vport_change_handle_locked(vport);
+ mutex_unlock(&esw->state_lock);
+}
+
+static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *vlan_grp = NULL;
+ struct mlx5_flow_group *drop_grp = NULL;
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *acl;
+ void *match_criteria;
+ u32 *flow_group_in;
+ /* The egress acl table contains 2 rules:
+ * 1)Allow traffic with vlan_tag=vst_vlan_id
+ * 2)Drop all other traffic.
+ */
+ int table_size = 2;
+ int err = 0;
+
+ if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support) ||
+ !IS_ERR_OR_NULL(vport->egress.acl))
+ return;
+
+ esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
+ vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
+ return;
+ }
+
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return;
+
+ acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+ if (IS_ERR(acl)) {
+ err = PTR_ERR(acl);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+ vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(vlan_grp)) {
+ err = PTR_ERR(vlan_grp);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+ drop_grp = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(drop_grp)) {
+ err = PTR_ERR(drop_grp);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ vport->egress.acl = acl;
+ vport->egress.drop_grp = drop_grp;
+ vport->egress.allowed_vlans_grp = vlan_grp;
+out:
+ kvfree(flow_group_in);
+ if (err && !IS_ERR_OR_NULL(vlan_grp))
+ mlx5_destroy_flow_group(vlan_grp);
+ if (err && !IS_ERR_OR_NULL(acl))
+ mlx5_destroy_flow_table(acl);
+}
+
+static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
+ mlx5_del_flow_rule(vport->egress.allowed_vlan);
+
+ if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
+ mlx5_del_flow_rule(vport->egress.drop_rule);
+
+ vport->egress.allowed_vlan = NULL;
+ vport->egress.drop_rule = NULL;
+}
+
+static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (IS_ERR_OR_NULL(vport->egress.acl))
+ return;
+
+ esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
+
+ esw_vport_cleanup_egress_rules(esw, vport);
+ mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
+ mlx5_destroy_flow_group(vport->egress.drop_grp);
+ mlx5_destroy_flow_table(vport->egress.acl);
+ vport->egress.allowed_vlans_grp = NULL;
+ vport->egress.drop_grp = NULL;
+ vport->egress.acl = NULL;
+}
+
+static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *acl;
+ struct mlx5_flow_group *g;
+ void *match_criteria;
+ u32 *flow_group_in;
+ /* The ingress acl table contains 4 groups
+ * (2 active rules at the same time -
+ * 1 allow rule from one of the first 3 groups.
+ * 1 drop rule from the last group):
+ * 1)Allow untagged traffic with smac=original mac.
+ * 2)Allow untagged traffic.
+ * 3)Allow traffic with smac=original mac.
+ * 4)Drop all other traffic.
+ */
+ int table_size = 4;
+ int err = 0;
+
+ if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support) ||
+ !IS_ERR_OR_NULL(vport->ingress.acl))
+ return;
+
+ esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
+ vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
+ return;
+ }
+
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return;
+
+ acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
+ if (IS_ERR(acl)) {
+ err = PTR_ERR(acl);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.acl = acl;
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.allow_untagged_spoofchk_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.allow_untagged_only_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.allow_spoofchk_only_grp = g;
+
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
+
+ g = mlx5_create_flow_group(acl, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+ vport->ingress.drop_grp = g;
+
+out:
+ if (err) {
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
+ mlx5_destroy_flow_group(
+ vport->ingress.allow_spoofchk_only_grp);
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
+ mlx5_destroy_flow_group(
+ vport->ingress.allow_untagged_only_grp);
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
+ mlx5_destroy_flow_group(
+ vport->ingress.allow_untagged_spoofchk_grp);
+ if (!IS_ERR_OR_NULL(vport->ingress.acl))
+ mlx5_destroy_flow_table(vport->ingress.acl);
+ }
+
+ kvfree(flow_group_in);
+}
+
+static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
+ mlx5_del_flow_rule(vport->ingress.drop_rule);
+
+ if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
+ mlx5_del_flow_rule(vport->ingress.allow_rule);
+
+ vport->ingress.drop_rule = NULL;
+ vport->ingress.allow_rule = NULL;
+}
+
+static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ if (IS_ERR_OR_NULL(vport->ingress.acl))
+ return;
+
+ esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
+
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
+ mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
+ mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
+ mlx5_destroy_flow_group(vport->ingress.drop_grp);
+ mlx5_destroy_flow_table(vport->ingress.acl);
+ vport->ingress.acl = NULL;
+ vport->ingress.drop_grp = NULL;
+ vport->ingress.allow_spoofchk_only_grp = NULL;
+ vport->ingress.allow_untagged_only_grp = NULL;
+ vport->ingress.allow_untagged_spoofchk_grp = NULL;
+}
+
+static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_flow_spec *spec;
+ u8 smac[ETH_ALEN];
+ int err = 0;
+ u8 *smac_v;
+
+ if (vport->spoofchk) {
+ err = mlx5_query_nic_vport_mac_address(esw->dev, vport->vport, smac);
+ if (err) {
+ esw_warn(esw->dev,
+ "vport[%d] configure ingress rules failed, query smac failed, err(%d)\n",
+ vport->vport, err);
+ return err;
+ }
+
+ if (!is_valid_ether_addr(smac)) {
+ mlx5_core_warn(esw->dev,
+ "vport[%d] configure ingress rules failed, illegal mac with spoofchk\n",
+ vport->vport);
+ return -EPERM;
+ }
+ }
+
+ esw_vport_cleanup_ingress_rules(esw, vport);
+
+ if (!vport->vlan && !vport->qos && !vport->spoofchk) {
+ esw_vport_disable_ingress_acl(esw, vport);
+ return 0;
+ }
+
+ esw_vport_enable_ingress_acl(esw, vport);
+
+ esw_debug(esw->dev,
+ "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
+ vport->vport, vport->vlan, vport->qos);
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
+ err = -ENOMEM;
+ esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ if (vport->vlan || vport->qos)
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
+
+ if (vport->spoofchk) {
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_15_0);
+ smac_v = MLX5_ADDR_OF(fte_match_param,
+ spec->match_value,
+ outer_headers.smac_47_16);
+ ether_addr_copy(smac_v, smac);
+ }
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ vport->ingress.allow_rule =
+ mlx5_add_flow_rule(vport->ingress.acl, spec,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW,
+ 0, NULL);
+ if (IS_ERR(vport->ingress.allow_rule)) {
+ err = PTR_ERR(vport->ingress.allow_rule);
+ pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.allow_rule = NULL;
+ goto out;
+ }
+
+ memset(spec, 0, sizeof(*spec));
+ vport->ingress.drop_rule =
+ mlx5_add_flow_rule(vport->ingress.acl, spec,
+ MLX5_FLOW_CONTEXT_ACTION_DROP,
+ 0, NULL);
+ if (IS_ERR(vport->ingress.drop_rule)) {
+ err = PTR_ERR(vport->ingress.drop_rule);
+ pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
+ vport->vport, err);
+ vport->ingress.drop_rule = NULL;
+ goto out;
+ }
+
+out:
+ if (err)
+ esw_vport_cleanup_ingress_rules(esw, vport);
+ kvfree(spec);
+ return err;
+}
+
+static int esw_vport_egress_config(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
+{
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ esw_vport_cleanup_egress_rules(esw, vport);
+
+ if (!vport->vlan && !vport->qos) {
+ esw_vport_disable_egress_acl(esw, vport);
+ return 0;
+ }
+
+ esw_vport_enable_egress_acl(esw, vport);
+
+ esw_debug(esw->dev,
+ "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
+ vport->vport, vport->vlan, vport->qos);
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
+ err = -ENOMEM;
+ esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
+ vport->vport, err);
+ goto out;
+ }
+
+ /* Allowed vlan rule */
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->vlan);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ vport->egress.allowed_vlan =
+ mlx5_add_flow_rule(vport->egress.acl, spec,
+ MLX5_FLOW_CONTEXT_ACTION_ALLOW,
+ 0, NULL);
+ if (IS_ERR(vport->egress.allowed_vlan)) {
+ err = PTR_ERR(vport->egress.allowed_vlan);
+ pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.allowed_vlan = NULL;
+ goto out;
+ }
+
+ /* Drop others rule (star rule) */
+ memset(spec, 0, sizeof(*spec));
+ vport->egress.drop_rule =
+ mlx5_add_flow_rule(vport->egress.acl, spec,
+ MLX5_FLOW_CONTEXT_ACTION_DROP,
+ 0, NULL);
+ if (IS_ERR(vport->egress.drop_rule)) {
+ err = PTR_ERR(vport->egress.drop_rule);
+ pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
+ vport->vport, err);
+ vport->egress.drop_rule = NULL;
+ }
+out:
+ kvfree(spec);
+ return err;
+}
+
static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
int enable_events)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
- unsigned long flags;
+ mutex_lock(&esw->state_lock);
WARN_ON(vport->enabled);
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
+
+ /* Only VFs need ACLs for VST and spoofchk filtering */
+ if (vport_num && esw->mode == SRIOV_LEGACY) {
+ esw_vport_ingress_config(esw, vport);
+ esw_vport_egress_config(esw, vport);
+ }
+
mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
vport_num,
@@ -725,53 +1464,29 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
/* Sync with current vport context */
vport->enabled_events = enable_events;
- esw_vport_change_handler(&vport->vport_change_handler);
-
- spin_lock_irqsave(&vport->lock, flags);
vport->enabled = true;
- spin_unlock_irqrestore(&vport->lock, flags);
- arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
+ /* only PF is trusted by default */
+ vport->trusted = (vport_num) ? false : true;
+ esw_vport_change_handle_locked(vport);
esw->enabled_vports++;
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
-}
-
-static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num)
-{
- struct mlx5_vport *vport = &esw->vports[vport_num];
- struct l2addr_node *node;
- struct vport_addr *addr;
- struct hlist_node *tmp;
- int hi;
-
- for_each_l2hash_node(node, tmp, vport->uc_list, hi) {
- addr = container_of(node, struct vport_addr, node);
- addr->action = MLX5_ACTION_DEL;
- }
- esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_UC);
-
- for_each_l2hash_node(node, tmp, vport->mc_list, hi) {
- addr = container_of(node, struct vport_addr, node);
- addr->action = MLX5_ACTION_DEL;
- }
- esw_apply_vport_addr_list(esw, vport_num, MLX5_NVPRT_LIST_TYPE_MC);
+ mutex_unlock(&esw->state_lock);
}
static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
{
struct mlx5_vport *vport = &esw->vports[vport_num];
- unsigned long flags;
if (!vport->enabled)
return;
esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
/* Mark this vport as disabled to discard new events */
- spin_lock_irqsave(&vport->lock, flags);
vport->enabled = false;
- vport->enabled_events = 0;
- spin_unlock_irqrestore(&vport->lock, flags);
+
+ synchronize_irq(mlx5_get_msix_vec(esw->dev, MLX5_EQ_VEC_ASYNC));
mlx5_modify_vport_admin_state(esw->dev,
MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
@@ -781,16 +1496,26 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
flush_workqueue(esw->work_queue);
/* Disable events from this vport */
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
- /* We don't assume VFs will cleanup after themselves */
- esw_cleanup_vport(esw, vport_num);
+ mutex_lock(&esw->state_lock);
+ /* We don't assume VFs will cleanup after themselves.
+ * Calling vport change handler while vport is disabled will cleanup
+ * the vport resources.
+ */
+ esw_vport_change_handle_locked(vport);
+ vport->enabled_events = 0;
+ if (vport_num && esw->mode == SRIOV_LEGACY) {
+ esw_vport_disable_egress_acl(esw, vport);
+ esw_vport_disable_ingress_acl(esw, vport);
+ }
esw->enabled_vports--;
+ mutex_unlock(&esw->state_lock);
}
/* Public E-Switch API */
-int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
+int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
{
int err;
- int i;
+ int i, enabled_events;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
@@ -802,16 +1527,26 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
return -ENOTSUPP;
}
- esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
+ if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
+ esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
+
+ if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
+ esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
+ esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
+ esw->mode = mode;
esw_disable_vport(esw, 0);
- err = esw_create_fdb_table(esw, nvfs + 1);
+ if (mode == SRIOV_LEGACY)
+ err = esw_create_legacy_fdb_table(esw, nvfs + 1);
+ else
+ err = esw_offloads_init(esw, nvfs + 1);
if (err)
goto abort;
+ enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE;
for (i = 0; i <= nvfs; i++)
- esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS);
+ esw_enable_vport(esw, i, enabled_events);
esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
esw->enabled_vports);
@@ -819,25 +1554,38 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
abort:
esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
+ esw->mode = SRIOV_NONE;
return err;
}
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
{
+ struct esw_mc_addr *mc_promisc;
+ int nvports;
int i;
if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return;
- esw_info(esw->dev, "disable SRIOV: active vports(%d)\n",
- esw->enabled_vports);
+ esw_info(esw->dev, "disable SRIOV: active vports(%d) mode(%d)\n",
+ esw->enabled_vports, esw->mode);
+
+ mc_promisc = esw->mc_promisc;
+ nvports = esw->enabled_vports;
for (i = 0; i < esw->total_vports; i++)
esw_disable_vport(esw, i);
- esw_destroy_fdb_table(esw);
+ if (mc_promisc && mc_promisc->uplink_rule)
+ mlx5_del_flow_rule(mc_promisc->uplink_rule);
+
+ if (esw->mode == SRIOV_LEGACY)
+ esw_destroy_legacy_fdb_table(esw);
+ else if (esw->mode == SRIOV_OFFLOADS)
+ esw_offloads_cleanup(esw, nvports);
+ esw->mode = SRIOV_NONE;
/* VPORT 0 (PF) must be enabled back with non-sriov configuration */
esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
}
@@ -845,7 +1593,8 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
- int total_vports = 1 + pci_sriov_get_totalvfs(dev->pdev);
+ int total_vports = MLX5_TOTAL_VPORTS(dev);
+ struct esw_mc_addr *mc_promisc;
struct mlx5_eswitch *esw;
int vport_num;
int err;
@@ -874,6 +1623,13 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
}
esw->l2_table.size = l2_table_size;
+ mc_promisc = kzalloc(sizeof(*mc_promisc), GFP_KERNEL);
+ if (!mc_promisc) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ esw->mc_promisc = mc_promisc;
+
esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
if (!esw->work_queue) {
err = -ENOMEM;
@@ -887,6 +1643,16 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
goto abort;
}
+ esw->offloads.vport_reps =
+ kzalloc(total_vports * sizeof(struct mlx5_eswitch_rep),
+ GFP_KERNEL);
+ if (!esw->offloads.vport_reps) {
+ err = -ENOMEM;
+ goto abort;
+ }
+
+ mutex_init(&esw->state_lock);
+
for (vport_num = 0; vport_num < total_vports; vport_num++) {
struct mlx5_vport *vport = &esw->vports[vport_num];
@@ -894,11 +1660,11 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev)
vport->dev = dev;
INIT_WORK(&vport->vport_change_handler,
esw_vport_change_handler);
- spin_lock_init(&vport->lock);
}
esw->total_vports = total_vports;
esw->enabled_vports = 0;
+ esw->mode = SRIOV_NONE;
dev->priv.eswitch = esw;
esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
@@ -909,6 +1675,7 @@ abort:
destroy_workqueue(esw->work_queue);
kfree(esw->l2_table.bitmap);
kfree(esw->vports);
+ kfree(esw->offloads.vport_reps);
kfree(esw);
return err;
}
@@ -925,6 +1692,8 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
esw->dev->priv.eswitch = NULL;
destroy_workqueue(esw->work_queue);
kfree(esw->l2_table.bitmap);
+ kfree(esw->mc_promisc);
+ kfree(esw->offloads.vport_reps);
kfree(esw->vports);
kfree(esw);
}
@@ -942,10 +1711,8 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
}
vport = &esw->vports[vport_num];
- spin_lock(&vport->lock);
if (vport->enabled)
queue_work(esw->work_queue, &vport->vport_change_handler);
- spin_unlock(&vport->lock);
}
/* Vport Administration */
@@ -953,9 +1720,23 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
+{
+ ((u8 *)node_guid)[7] = mac[0];
+ ((u8 *)node_guid)[6] = mac[1];
+ ((u8 *)node_guid)[5] = mac[2];
+ ((u8 *)node_guid)[4] = 0xff;
+ ((u8 *)node_guid)[3] = 0xfe;
+ ((u8 *)node_guid)[2] = mac[3];
+ ((u8 *)node_guid)[1] = mac[4];
+ ((u8 *)node_guid)[0] = mac[5];
+}
+
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN])
{
+ struct mlx5_vport *evport;
+ u64 node_guid;
int err = 0;
if (!ESW_ALLOWED(esw))
@@ -963,6 +1744,15 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
+ evport = &esw->vports[vport];
+
+ if (evport->spoofchk && !is_valid_ether_addr(mac)) {
+ mlx5_core_warn(esw->dev,
+ "MAC invalidation is not allowed when spoofchk is on, vport(%d)\n",
+ vport);
+ return -EPERM;
+ }
+
err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
if (err) {
mlx5_core_warn(esw->dev,
@@ -971,6 +1761,17 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
return err;
}
+ node_guid_gen_from_mac(&node_guid, mac);
+ err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
+ if (err)
+ mlx5_core_warn(esw->dev,
+ "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
+ vport, err);
+
+ mutex_lock(&esw->state_lock);
+ if (evport->enabled && esw->mode == SRIOV_LEGACY)
+ err = esw_vport_ingress_config(esw, evport);
+ mutex_unlock(&esw->state_lock);
return err;
}
@@ -990,6 +1791,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi)
{
+ struct mlx5_vport *evport;
u16 vlan;
u8 qos;
@@ -998,6 +1800,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
+ evport = &esw->vports[vport];
+
memset(ivi, 0, sizeof(*ivi));
ivi->vf = vport - 1;
@@ -1008,7 +1812,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
ivi->vlan = vlan;
ivi->qos = qos;
- ivi->spoofchk = 0;
+ ivi->spoofchk = evport->spoofchk;
return 0;
}
@@ -1016,6 +1820,8 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos)
{
+ struct mlx5_vport *evport;
+ int err = 0;
int set = 0;
if (!ESW_ALLOWED(esw))
@@ -1026,7 +1832,73 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
if (vlan || qos)
set = 1;
- return modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+ evport = &esw->vports[vport];
+
+ err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
+ if (err)
+ return err;
+
+ mutex_lock(&esw->state_lock);
+ evport->vlan = vlan;
+ evport->qos = qos;
+ if (evport->enabled && esw->mode == SRIOV_LEGACY) {
+ err = esw_vport_ingress_config(esw, evport);
+ if (err)
+ goto out;
+ err = esw_vport_egress_config(esw, evport);
+ }
+
+out:
+ mutex_unlock(&esw->state_lock);
+ return err;
+}
+
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+ int vport, bool spoofchk)
+{
+ struct mlx5_vport *evport;
+ bool pschk;
+ int err = 0;
+
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+ if (!LEGAL_VPORT(esw, vport))
+ return -EINVAL;
+
+ evport = &esw->vports[vport];
+
+ mutex_lock(&esw->state_lock);
+ pschk = evport->spoofchk;
+ evport->spoofchk = spoofchk;
+ if (evport->enabled && esw->mode == SRIOV_LEGACY) {
+ err = esw_vport_ingress_config(esw, evport);
+ if (err)
+ evport->spoofchk = pschk;
+ }
+ mutex_unlock(&esw->state_lock);
+
+ return err;
+}
+
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+ int vport, bool setting)
+{
+ struct mlx5_vport *evport;
+
+ if (!ESW_ALLOWED(esw))
+ return -EPERM;
+ if (!LEGAL_VPORT(esw, vport))
+ return -EINVAL;
+
+ evport = &esw->vports[vport];
+
+ mutex_lock(&esw->state_lock);
+ evport->trusted = setting;
+ if (evport->enabled)
+ esw_vport_change_handle_locked(evport);
+ mutex_unlock(&esw->state_lock);
+
+ return 0;
}
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 3416a428f70f..a96140971d77 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -35,6 +35,7 @@
#include <linux/if_ether.h>
#include <linux/if_link.h>
+#include <net/devlink.h>
#include <linux/mlx5/device.h>
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -46,6 +47,8 @@
#define MLX5_L2_ADDR_HASH_SIZE (BIT(BITS_PER_BYTE))
#define MLX5_L2_ADDR_HASH(addr) (addr[5])
+#define FDB_UPLINK_VPORT 0xffff
+
/* L2 -mac address based- hash helpers */
struct l2addr_node {
struct hlist_node hlist;
@@ -88,18 +91,40 @@ struct l2addr_node {
kfree(ptr); \
})
+struct vport_ingress {
+ struct mlx5_flow_table *acl;
+ struct mlx5_flow_group *allow_untagged_spoofchk_grp;
+ struct mlx5_flow_group *allow_spoofchk_only_grp;
+ struct mlx5_flow_group *allow_untagged_only_grp;
+ struct mlx5_flow_group *drop_grp;
+ struct mlx5_flow_rule *allow_rule;
+ struct mlx5_flow_rule *drop_rule;
+};
+
+struct vport_egress {
+ struct mlx5_flow_table *acl;
+ struct mlx5_flow_group *allowed_vlans_grp;
+ struct mlx5_flow_group *drop_grp;
+ struct mlx5_flow_rule *allowed_vlan;
+ struct mlx5_flow_rule *drop_rule;
+};
+
struct mlx5_vport {
struct mlx5_core_dev *dev;
int vport;
struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE];
struct hlist_head mc_list[MLX5_L2_ADDR_HASH_SIZE];
+ struct mlx5_flow_rule *promisc_rule;
+ struct mlx5_flow_rule *allmulti_rule;
struct work_struct vport_change_handler;
- /* This spinlock protects access to vport data, between
- * "esw_vport_disable" and ongoing interrupt "mlx5_eswitch_vport_event"
- * once vport marked as disabled new interrupts are discarded.
- */
- spinlock_t lock; /* vport events sync */
+ struct vport_ingress ingress;
+ struct vport_egress egress;
+
+ u16 vlan;
+ u8 qos;
+ bool spoofchk;
+ bool trusted;
bool enabled;
u16 enabled_events;
};
@@ -112,7 +137,50 @@ struct mlx5_l2_table {
struct mlx5_eswitch_fdb {
void *fdb;
- struct mlx5_flow_group *addr_grp;
+ union {
+ struct legacy_fdb {
+ struct mlx5_flow_group *addr_grp;
+ struct mlx5_flow_group *allmulti_grp;
+ struct mlx5_flow_group *promisc_grp;
+ } legacy;
+
+ struct offloads_fdb {
+ struct mlx5_flow_table *fdb;
+ struct mlx5_flow_group *send_to_vport_grp;
+ struct mlx5_flow_group *miss_grp;
+ struct mlx5_flow_rule *miss_rule;
+ } offloads;
+ };
+};
+
+enum {
+ SRIOV_NONE,
+ SRIOV_LEGACY,
+ SRIOV_OFFLOADS
+};
+
+struct mlx5_esw_sq {
+ struct mlx5_flow_rule *send_to_vport_rule;
+ struct list_head list;
+};
+
+struct mlx5_eswitch_rep {
+ int (*load)(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+ void (*unload)(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+ u16 vport;
+ struct mlx5_flow_rule *vport_rx_rule;
+ void *priv_data;
+ struct list_head vport_sqs_list;
+ bool valid;
+ u8 hw_id[ETH_ALEN];
+};
+
+struct mlx5_esw_offload {
+ struct mlx5_flow_table *ft_offloads;
+ struct mlx5_flow_group *vport_rx_group;
+ struct mlx5_eswitch_rep *vport_reps;
};
struct mlx5_eswitch {
@@ -124,13 +192,20 @@ struct mlx5_eswitch {
struct mlx5_vport *vports;
int total_vports;
int enabled_vports;
+ /* Synchronize between vport change events
+ * and async SRIOV admin state changes
+ */
+ struct mutex state_lock;
+ struct esw_mc_addr *mc_promisc;
+ struct mlx5_esw_offload offloads;
+ int mode;
};
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe);
-int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs);
+int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode);
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw);
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
int vport, u8 mac[ETH_ALEN]);
@@ -138,10 +213,46 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
int vport, int link_state);
int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
int vport, u16 vlan, u8 qos);
+int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
+ int vport, bool spoofchk);
+int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
+ int vport_num, bool setting);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi);
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats);
+struct mlx5_flow_spec;
+
+struct mlx5_flow_rule *
+mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_spec *spec,
+ u32 action, u32 src_vport, u32 dst_vport);
+struct mlx5_flow_rule *
+mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
+
+int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ u16 *sqns_array, int sqns_num);
+void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode);
+int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode);
+void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep);
+void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
+ int vport);
+
+#define MLX5_DEBUG_ESWITCH_MASK BIT(3)
+
+#define esw_info(dev, format, ...) \
+ pr_info("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
+
+#define esw_warn(dev, format, ...) \
+ pr_warn("(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
+
+#define esw_debug(dev, format, ...) \
+ mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
#endif /* __MLX5_ESWITCH_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
new file mode 100644
index 000000000000..7de40e6b0c25
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -0,0 +1,646 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/vport.h>
+#include <linux/mlx5/fs.h>
+#include "mlx5_core.h"
+#include "eswitch.h"
+
+enum {
+ FDB_FAST_PATH = 0,
+ FDB_SLOW_PATH
+};
+
+struct mlx5_flow_rule *
+mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
+ struct mlx5_flow_spec *spec,
+ u32 action, u32 src_vport, u32 dst_vport)
+{
+ struct mlx5_flow_destination dest = { 0 };
+ struct mlx5_fc *counter = NULL;
+ struct mlx5_flow_rule *rule;
+ void *misc;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport_num = dst_vport;
+ action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ counter = mlx5_fc_create(esw->dev, true);
+ if (IS_ERR(counter))
+ return ERR_CAST(counter);
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter = counter;
+ }
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port, src_vport);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
+ MLX5_MATCH_MISC_PARAMETERS;
+
+ rule = mlx5_add_flow_rule((struct mlx5_flow_table *)esw->fdb_table.fdb,
+ spec, action, 0, &dest);
+
+ if (IS_ERR(rule))
+ mlx5_fc_destroy(esw->dev, counter);
+
+ return rule;
+}
+
+static struct mlx5_flow_rule *
+mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
+{
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_rule *flow_rule;
+ struct mlx5_flow_spec *spec;
+ void *misc;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
+ esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
+ flow_rule = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
+ MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport_num = vport;
+
+ flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ 0, &dest);
+ if (IS_ERR(flow_rule))
+ esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
+out:
+ kvfree(spec);
+ return flow_rule;
+}
+
+void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5_esw_sq *esw_sq, *tmp;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return;
+
+ list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
+ mlx5_del_flow_rule(esw_sq->send_to_vport_rule);
+ list_del(&esw_sq->list);
+ kfree(esw_sq);
+ }
+}
+
+int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep,
+ u16 *sqns_array, int sqns_num)
+{
+ struct mlx5_flow_rule *flow_rule;
+ struct mlx5_esw_sq *esw_sq;
+ int vport;
+ int err;
+ int i;
+
+ if (esw->mode != SRIOV_OFFLOADS)
+ return 0;
+
+ vport = rep->vport == 0 ?
+ FDB_UPLINK_VPORT : rep->vport;
+
+ for (i = 0; i < sqns_num; i++) {
+ esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
+ if (!esw_sq) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ /* Add re-inject rule to the PF/representor sqs */
+ flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
+ vport,
+ sqns_array[i]);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ kfree(esw_sq);
+ goto out_err;
+ }
+ esw_sq->send_to_vport_rule = flow_rule;
+ list_add(&esw_sq->list, &rep->vport_sqs_list);
+ }
+ return 0;
+
+out_err:
+ mlx5_eswitch_sqs2vport_stop(esw, rep);
+ return err;
+}
+
+static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_rule *flow_rule = NULL;
+ struct mlx5_flow_spec *spec;
+ int err = 0;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
+ esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
+ err = -ENOMEM;
+ goto out;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
+ dest.vport_num = 0;
+
+ flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ 0, &dest);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
+ goto out;
+ }
+
+ esw->fdb_table.offloads.miss_rule = flow_rule;
+out:
+ kvfree(spec);
+ return err;
+}
+
+#define MAX_PF_SQ 256
+#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
+#define ESW_OFFLOADS_NUM_GROUPS 4
+
+static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_core_dev *dev = esw->dev;
+ struct mlx5_flow_namespace *root_ns;
+ struct mlx5_flow_table *fdb = NULL;
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ void *match_criteria;
+ int table_size, ix, err = 0;
+
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
+ if (!root_ns) {
+ esw_warn(dev, "Failed to get FDB flow namespace\n");
+ goto ns_err;
+ }
+
+ esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+
+ fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
+ ESW_OFFLOADS_NUM_ENTRIES,
+ ESW_OFFLOADS_NUM_GROUPS, 0);
+ if (IS_ERR(fdb)) {
+ err = PTR_ERR(fdb);
+ esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
+ goto fast_fdb_err;
+ }
+ esw->fdb_table.fdb = fdb;
+
+ table_size = nvports + MAX_PF_SQ + 1;
+ fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0);
+ if (IS_ERR(fdb)) {
+ err = PTR_ERR(fdb);
+ esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
+ goto slow_fdb_err;
+ }
+ esw->fdb_table.offloads.fdb = fdb;
+
+ /* create send-to-vport group */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
+
+ ix = nvports + MAX_PF_SQ;
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
+ goto send_vport_err;
+ }
+ esw->fdb_table.offloads.send_to_vport_grp = g;
+
+ /* create miss group */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
+
+ g = mlx5_create_flow_group(fdb, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
+ goto miss_err;
+ }
+ esw->fdb_table.offloads.miss_grp = g;
+
+ err = esw_add_fdb_miss_rule(esw);
+ if (err)
+ goto miss_rule_err;
+
+ return 0;
+
+miss_rule_err:
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+miss_err:
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
+send_vport_err:
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
+slow_fdb_err:
+ mlx5_destroy_flow_table(esw->fdb_table.fdb);
+fast_fdb_err:
+ns_err:
+ kvfree(flow_group_in);
+ return err;
+}
+
+static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
+{
+ if (!esw->fdb_table.fdb)
+ return;
+
+ esw_debug(esw->dev, "Destroy offloads FDB Table\n");
+ mlx5_del_flow_rule(esw->fdb_table.offloads.miss_rule);
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
+ mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
+
+ mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
+ mlx5_destroy_flow_table(esw->fdb_table.fdb);
+}
+
+static int esw_create_offloads_table(struct mlx5_eswitch *esw)
+{
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_table *ft_offloads;
+ struct mlx5_core_dev *dev = esw->dev;
+ int err = 0;
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
+ if (!ns) {
+ esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
+ return -ENOMEM;
+ }
+
+ ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0);
+ if (IS_ERR(ft_offloads)) {
+ err = PTR_ERR(ft_offloads);
+ esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
+ return err;
+ }
+
+ esw->offloads.ft_offloads = ft_offloads;
+ return 0;
+}
+
+static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
+{
+ struct mlx5_esw_offload *offloads = &esw->offloads;
+
+ mlx5_destroy_flow_table(offloads->ft_offloads);
+}
+
+static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *g;
+ struct mlx5_priv *priv = &esw->dev->priv;
+ u32 *flow_group_in;
+ void *match_criteria, *misc;
+ int err = 0;
+ int nvports = priv->sriov.num_vfs + 2;
+
+ flow_group_in = mlx5_vzalloc(inlen);
+ if (!flow_group_in)
+ return -ENOMEM;
+
+ /* create vport rx group */
+ memset(flow_group_in, 0, inlen);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_MISC_PARAMETERS);
+
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
+ misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
+
+ g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
+
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
+ goto out;
+ }
+
+ esw->offloads.vport_rx_group = g;
+out:
+ kfree(flow_group_in);
+ return err;
+}
+
+static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
+{
+ mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
+}
+
+struct mlx5_flow_rule *
+mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
+{
+ struct mlx5_flow_destination dest;
+ struct mlx5_flow_rule *flow_rule;
+ struct mlx5_flow_spec *spec;
+ void *misc;
+
+ spec = mlx5_vzalloc(sizeof(*spec));
+ if (!spec) {
+ esw_warn(esw->dev, "Failed to alloc match parameters\n");
+ flow_rule = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port, vport);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
+ dest.tir_num = tirn;
+
+ flow_rule = mlx5_add_flow_rule(esw->offloads.ft_offloads, spec,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+ 0, &dest);
+ if (IS_ERR(flow_rule)) {
+ esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
+ goto out;
+ }
+
+out:
+ kvfree(spec);
+ return flow_rule;
+}
+
+static int esw_offloads_start(struct mlx5_eswitch *esw)
+{
+ int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
+
+ if (esw->mode != SRIOV_LEGACY) {
+ esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
+ return -EINVAL;
+ }
+
+ mlx5_eswitch_disable_sriov(esw);
+ err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
+ if (err) {
+ esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
+ err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
+ if (err1)
+ esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+ }
+ return err;
+}
+
+int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
+{
+ struct mlx5_eswitch_rep *rep;
+ int vport;
+ int err;
+
+ err = esw_create_offloads_fdb_table(esw, nvports);
+ if (err)
+ return err;
+
+ err = esw_create_offloads_table(esw);
+ if (err)
+ goto create_ft_err;
+
+ err = esw_create_vport_rx_group(esw);
+ if (err)
+ goto create_fg_err;
+
+ for (vport = 0; vport < nvports; vport++) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->valid)
+ continue;
+
+ err = rep->load(esw, rep);
+ if (err)
+ goto err_reps;
+ }
+ return 0;
+
+err_reps:
+ for (vport--; vport >= 0; vport--) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->valid)
+ continue;
+ rep->unload(esw, rep);
+ }
+ esw_destroy_vport_rx_group(esw);
+
+create_fg_err:
+ esw_destroy_offloads_table(esw);
+
+create_ft_err:
+ esw_destroy_offloads_fdb_table(esw);
+ return err;
+}
+
+static int esw_offloads_stop(struct mlx5_eswitch *esw)
+{
+ int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
+
+ mlx5_eswitch_disable_sriov(esw);
+ err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
+ if (err) {
+ esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
+ err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
+ if (err1)
+ esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
+ }
+
+ return err;
+}
+
+void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
+{
+ struct mlx5_eswitch_rep *rep;
+ int vport;
+
+ for (vport = 0; vport < nvports; vport++) {
+ rep = &esw->offloads.vport_reps[vport];
+ if (!rep->valid)
+ continue;
+ rep->unload(esw, rep);
+ }
+
+ esw_destroy_vport_rx_group(esw);
+ esw_destroy_offloads_table(esw);
+ esw_destroy_offloads_fdb_table(esw);
+}
+
+static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
+{
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ *mlx5_mode = SRIOV_LEGACY;
+ break;
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ *mlx5_mode = SRIOV_OFFLOADS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
+{
+ switch (mlx5_mode) {
+ case SRIOV_LEGACY:
+ *mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ break;
+ case SRIOV_OFFLOADS:
+ *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
+{
+ struct mlx5_core_dev *dev;
+ u16 cur_mlx5_mode, mlx5_mode = 0;
+
+ dev = devlink_priv(devlink);
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ cur_mlx5_mode = dev->priv.eswitch->mode;
+
+ if (cur_mlx5_mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ if (esw_mode_from_devlink(mode, &mlx5_mode))
+ return -EINVAL;
+
+ if (cur_mlx5_mode == mlx5_mode)
+ return 0;
+
+ if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
+ return esw_offloads_start(dev->priv.eswitch);
+ else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ return esw_offloads_stop(dev->priv.eswitch);
+ else
+ return -EINVAL;
+}
+
+int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct mlx5_core_dev *dev;
+
+ dev = devlink_priv(devlink);
+
+ if (!MLX5_CAP_GEN(dev, vport_group_manager))
+ return -EOPNOTSUPP;
+
+ if (dev->priv.eswitch->mode == SRIOV_NONE)
+ return -EOPNOTSUPP;
+
+ return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
+}
+
+void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5_esw_offload *offloads = &esw->offloads;
+
+ memcpy(&offloads->vport_reps[rep->vport], rep,
+ sizeof(struct mlx5_eswitch_rep));
+
+ INIT_LIST_HEAD(&offloads->vport_reps[rep->vport].vport_sqs_list);
+ offloads->vport_reps[rep->vport].valid = true;
+}
+
+void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
+ int vport)
+{
+ struct mlx5_esw_offload *offloads = &esw->offloads;
+ struct mlx5_eswitch_rep *rep;
+
+ rep = &offloads->vport_reps[vport];
+
+ if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport].enabled)
+ rep->unload(esw, rep);
+
+ offloads->vport_reps[vport].valid = false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index f46f1db0fc00..287ade151ec8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -50,6 +50,10 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
+ MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
+ }
memset(out, 0, sizeof(out));
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
@@ -57,6 +61,7 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
}
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+ u16 vport,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
*next_ft, unsigned int *table_id)
@@ -77,6 +82,10 @@ int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
MLX5_SET(create_flow_table_in, in, table_type, type);
MLX5_SET(create_flow_table_in, in, level, level);
MLX5_SET(create_flow_table_in, in, log_size, log_size);
+ if (vport) {
+ MLX5_SET(create_flow_table_in, in, vport_number, vport);
+ MLX5_SET(create_flow_table_in, in, other_vport, 1);
+ }
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
@@ -101,6 +110,10 @@ int mlx5_cmd_destroy_flow_table(struct mlx5_core_dev *dev,
MLX5_CMD_OP_DESTROY_FLOW_TABLE);
MLX5_SET(destroy_flow_table_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_table_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(destroy_flow_table_in, in, vport_number, ft->vport);
+ MLX5_SET(destroy_flow_table_in, in, other_vport, 1);
+ }
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
@@ -120,6 +133,10 @@ int mlx5_cmd_modify_flow_table(struct mlx5_core_dev *dev,
MLX5_CMD_OP_MODIFY_FLOW_TABLE);
MLX5_SET(modify_flow_table_in, in, table_type, ft->type);
MLX5_SET(modify_flow_table_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(modify_flow_table_in, in, vport_number, ft->vport);
+ MLX5_SET(modify_flow_table_in, in, other_vport, 1);
+ }
MLX5_SET(modify_flow_table_in, in, modify_field_select,
MLX5_MODIFY_FLOW_TABLE_MISS_TABLE_ID);
if (next_ft) {
@@ -148,6 +165,10 @@ int mlx5_cmd_create_flow_group(struct mlx5_core_dev *dev,
MLX5_CMD_OP_CREATE_FLOW_GROUP);
MLX5_SET(create_flow_group_in, in, table_type, ft->type);
MLX5_SET(create_flow_group_in, in, table_id, ft->id);
+ if (ft->vport) {
+ MLX5_SET(create_flow_group_in, in, vport_number, ft->vport);
+ MLX5_SET(create_flow_group_in, in, other_vport, 1);
+ }
err = mlx5_cmd_exec_check_status(dev, in,
inlen, out,
@@ -174,6 +195,10 @@ int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev,
MLX5_SET(destroy_flow_group_in, in, table_type, ft->type);
MLX5_SET(destroy_flow_group_in, in, table_id, ft->id);
MLX5_SET(destroy_flow_group_in, in, group_id, group_id);
+ if (ft->vport) {
+ MLX5_SET(destroy_flow_group_in, in, vport_number, ft->vport);
+ MLX5_SET(destroy_flow_group_in, in, other_vport, 1);
+ }
return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
sizeof(out));
@@ -207,22 +232,29 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(set_fte_in, in, table_type, ft->type);
MLX5_SET(set_fte_in, in, table_id, ft->id);
MLX5_SET(set_fte_in, in, flow_index, fte->index);
+ if (ft->vport) {
+ MLX5_SET(set_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(set_fte_in, in, other_vport, 1);
+ }
in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
MLX5_SET(flow_context, in_flow_context, group_id, group_id);
MLX5_SET(flow_context, in_flow_context, flow_tag, fte->flow_tag);
MLX5_SET(flow_context, in_flow_context, action, fte->action);
- MLX5_SET(flow_context, in_flow_context, destination_list_size,
- fte->dests_size);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
+ in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
+ int list_size = 0;
+
list_for_each_entry(dst, &fte->node.children, node.list) {
unsigned int id;
+ if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
MLX5_SET(dest_format_struct, in_dests, destination_type,
dst->dest_attr.type);
if (dst->dest_attr.type ==
@@ -233,8 +265,31 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
}
MLX5_SET(dest_format_struct, in_dests, destination_id, id);
in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+ list_size++;
}
+
+ MLX5_SET(flow_context, in_flow_context, destination_list_size,
+ list_size);
}
+
+ if (fte->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+ int list_size = 0;
+
+ list_for_each_entry(dst, &fte->node.children, node.list) {
+ if (dst->dest_attr.type !=
+ MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ continue;
+
+ MLX5_SET(flow_counter_list, in_dests, flow_counter_id,
+ dst->dest_attr.counter->id);
+ in_dests += MLX5_ST_SZ_BYTES(dest_format_struct);
+ list_size++;
+ }
+
+ MLX5_SET(flow_context, in_flow_context, flow_counter_list_size,
+ list_size);
+ }
+
memset(out, 0, sizeof(out));
err = mlx5_cmd_exec_check_status(dev, in, inlen, out,
sizeof(out));
@@ -254,18 +309,16 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned group_id,
+ int modify_mask,
struct fs_fte *fte)
{
int opmod;
- int modify_mask;
int atomic_mod_cap = MLX5_CAP_FLOWTABLE(dev,
flow_table_properties_nic_receive.
flow_modify_en);
if (!atomic_mod_cap)
return -ENOTSUPP;
opmod = 1;
- modify_mask = 1 <<
- MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST;
return mlx5_cmd_set_fte(dev, opmod, modify_mask, ft, group_id, fte);
}
@@ -285,8 +338,145 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
MLX5_SET(delete_fte_in, in, table_type, ft->type);
MLX5_SET(delete_fte_in, in, table_id, ft->id);
MLX5_SET(delete_fte_in, in, flow_index, index);
+ if (ft->vport) {
+ MLX5_SET(delete_fte_in, in, vport_number, ft->vport);
+ MLX5_SET(delete_fte_in, in, other_vport, 1);
+ }
err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
return err;
}
+
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)];
+ u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(alloc_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ *id = MLX5_GET(alloc_flow_counter_out, out, flow_counter_id);
+
+ return 0;
+}
+
+int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_flow_counter_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(dealloc_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
+ MLX5_SET(dealloc_flow_counter_in, in, flow_counter_id, id);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes)
+{
+ u32 out[MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+ MLX5_ST_SZ_BYTES(traffic_counter)];
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
+ void *stats;
+ int err = 0;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(query_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_FLOW_COUNTER);
+ MLX5_SET(query_flow_counter_in, in, op_mod, 0);
+ MLX5_SET(query_flow_counter_in, in, flow_counter_id, id);
+
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (err)
+ return err;
+
+ stats = MLX5_ADDR_OF(query_flow_counter_out, out, flow_statistics);
+ *packets = MLX5_GET64(traffic_counter, stats, packets);
+ *bytes = MLX5_GET64(traffic_counter, stats, octets);
+
+ return 0;
+}
+
+struct mlx5_cmd_fc_bulk {
+ u16 id;
+ int num;
+ int outlen;
+ u32 out[0];
+};
+
+struct mlx5_cmd_fc_bulk *
+mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
+{
+ struct mlx5_cmd_fc_bulk *b;
+ int outlen =
+ MLX5_ST_SZ_BYTES(query_flow_counter_out) +
+ MLX5_ST_SZ_BYTES(traffic_counter) * num;
+
+ b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
+ if (!b)
+ return NULL;
+
+ b->id = id;
+ b->num = num;
+ b->outlen = outlen;
+
+ return b;
+}
+
+void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
+{
+ kfree(b);
+}
+
+int
+mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
+{
+ u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_flow_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_FLOW_COUNTER);
+ MLX5_SET(query_flow_counter_in, in, op_mod, 0);
+ MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
+ MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ b->out, b->outlen);
+}
+
+void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_fc_bulk *b, u16 id,
+ u64 *packets, u64 *bytes)
+{
+ int index = id - b->id;
+ void *stats;
+
+ if (index < 0 || index >= b->num) {
+ mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
+ id, b->id, b->id + b->num - 1);
+ return;
+ }
+
+ stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
+ flow_statistics[index]);
+ *packets = MLX5_GET64(traffic_counter, stats, packets);
+ *bytes = MLX5_GET64(traffic_counter, stats, octets);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 9814d4784803..158844cef82b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -34,6 +34,7 @@
#define _MLX5_FS_CMD_
int mlx5_cmd_create_flow_table(struct mlx5_core_dev *dev,
+ u16 vport,
enum fs_flow_table_type type, unsigned int level,
unsigned int log_size, struct mlx5_flow_table
*next_ft, unsigned int *table_id);
@@ -61,6 +62,7 @@ int mlx5_cmd_create_fte(struct mlx5_core_dev *dev,
int mlx5_cmd_update_fte(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft,
unsigned group_id,
+ int modify_mask,
struct fs_fte *fte);
int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
@@ -69,4 +71,21 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
struct mlx5_flow_table *ft);
+
+int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u16 *id);
+int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u16 id);
+int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u16 id,
+ u64 *packets, u64 *bytes);
+
+struct mlx5_cmd_fc_bulk;
+
+struct mlx5_cmd_fc_bulk *
+mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num);
+void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b);
+int
+mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b);
+void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_fc_bulk *b, u16 id,
+ u64 *packets, u64 *bytes);
+
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 89cce97d46c6..3d6c1f65e586 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -40,18 +40,18 @@
#define INIT_TREE_NODE_ARRAY_SIZE(...) (sizeof((struct init_tree_node[]){__VA_ARGS__}) /\
sizeof(struct init_tree_node))
-#define ADD_PRIO(num_prios_val, min_level_val, max_ft_val, caps_val,\
+#define ADD_PRIO(num_prios_val, min_level_val, num_levels_val, caps_val,\
...) {.type = FS_TYPE_PRIO,\
.min_ft_level = min_level_val,\
- .max_ft = max_ft_val,\
+ .num_levels = num_levels_val,\
.num_leaf_prios = num_prios_val,\
.caps = caps_val,\
.children = (struct init_tree_node[]) {__VA_ARGS__},\
.ar_size = INIT_TREE_NODE_ARRAY_SIZE(__VA_ARGS__) \
}
-#define ADD_MULTIPLE_PRIO(num_prios_val, max_ft_val, ...)\
- ADD_PRIO(num_prios_val, 0, max_ft_val, {},\
+#define ADD_MULTIPLE_PRIO(num_prios_val, num_levels_val, ...)\
+ ADD_PRIO(num_prios_val, 0, num_levels_val, {},\
__VA_ARGS__)\
#define ADD_NS(...) {.type = FS_TYPE_NAMESPACE,\
@@ -67,19 +67,35 @@
#define FS_REQUIRED_CAPS(...) {.arr_sz = INIT_CAPS_ARRAY_SIZE(__VA_ARGS__), \
.caps = (long[]) {__VA_ARGS__} }
-#define LEFTOVERS_MAX_FT 1
+#define FS_CHAINING_CAPS FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en), \
+ FS_CAP(flow_table_properties_nic_receive.modify_root), \
+ FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode), \
+ FS_CAP(flow_table_properties_nic_receive.flow_table_modify))
+
+#define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1
-#define BY_PASS_PRIO_MAX_FT 1
-#define BY_PASS_MIN_LEVEL (KENREL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
- LEFTOVERS_MAX_FT)
-#define KERNEL_MAX_FT 3
-#define KERNEL_NUM_PRIOS 2
-#define KENREL_MIN_LEVEL 2
+#define BY_PASS_PRIO_NUM_LEVELS 1
+#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
+ LEFTOVERS_NUM_PRIOS)
+
+#define ETHTOOL_PRIO_NUM_LEVELS 1
+#define ETHTOOL_NUM_PRIOS 11
+#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
+/* Vlan, mac, ttc, aRFS */
+#define KERNEL_NIC_PRIO_NUM_LEVELS 4
+#define KERNEL_NIC_NUM_PRIOS 1
+/* One more level for tc */
+#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
-#define ANCHOR_MAX_FT 1
+#define ANCHOR_NUM_LEVELS 1
#define ANCHOR_NUM_PRIOS 1
#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
+
+#define OFFLOADS_MAX_FT 1
+#define OFFLOADS_NUM_PRIOS 1
+#define OFFLOADS_MIN_LEVEL (ANCHOR_MIN_LEVEL + 1)
+
struct node_caps {
size_t arr_sz;
long *caps;
@@ -92,27 +108,30 @@ static struct init_tree_node {
int min_ft_level;
int num_leaf_prios;
int prio;
- int max_ft;
+ int num_levels;
} root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 4,
+ .ar_size = 6,
.children = (struct init_tree_node[]) {
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
- FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
- FS_CAP(flow_table_properties_nic_receive.modify_root),
- FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
- FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
- ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, BY_PASS_PRIO_MAX_FT))),
- ADD_PRIO(0, KENREL_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NUM_PRIOS, KERNEL_MAX_FT))),
+ FS_CHAINING_CAPS,
+ ADD_NS(ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, OFFLOADS_MIN_LEVEL, 0, {},
+ ADD_NS(ADD_MULTIPLE_PRIO(OFFLOADS_NUM_PRIOS, OFFLOADS_MAX_FT))),
+ ADD_PRIO(0, ETHTOOL_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
+ ETHTOOL_PRIO_NUM_LEVELS))),
+ ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
+ ADD_NS(ADD_MULTIPLE_PRIO(1, 1),
+ ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
+ KERNEL_NIC_PRIO_NUM_LEVELS))),
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
- FS_REQUIRED_CAPS(FS_CAP(flow_table_properties_nic_receive.flow_modify_en),
- FS_CAP(flow_table_properties_nic_receive.modify_root),
- FS_CAP(flow_table_properties_nic_receive.identified_miss_table_mode),
- FS_CAP(flow_table_properties_nic_receive.flow_table_modify)),
- ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_MAX_FT))),
+ FS_CHAINING_CAPS,
+ ADD_NS(ADD_MULTIPLE_PRIO(LEFTOVERS_NUM_PRIOS, LEFTOVERS_NUM_LEVELS))),
ADD_PRIO(0, ANCHOR_MIN_LEVEL, 0, {},
- ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_MAX_FT))),
+ ADD_NS(ADD_MULTIPLE_PRIO(ANCHOR_NUM_PRIOS, ANCHOR_NUM_LEVELS))),
}
};
@@ -222,19 +241,6 @@ static struct fs_prio *find_prio(struct mlx5_flow_namespace *ns,
return NULL;
}
-static unsigned int find_next_free_level(struct fs_prio *prio)
-{
- if (!list_empty(&prio->node.children)) {
- struct mlx5_flow_table *ft;
-
- ft = list_last_entry(&prio->node.children,
- struct mlx5_flow_table,
- node.list);
- return ft->level + 1;
- }
- return prio->start_level;
-}
-
static bool masked_memcmp(void *mask, void *val1, void *val2, size_t size)
{
unsigned int i;
@@ -351,6 +357,7 @@ static void del_rule(struct fs_node *node)
struct mlx5_flow_group *fg;
struct fs_fte *fte;
u32 *match_value;
+ int modify_mask;
struct mlx5_core_dev *dev = get_dev(node);
int match_len = MLX5_ST_SZ_BYTES(fte_match_param);
int err;
@@ -374,8 +381,11 @@ static void del_rule(struct fs_node *node)
}
if ((fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
--fte->dests_size) {
+ modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST),
err = mlx5_cmd_update_fte(dev, ft,
- fg->id, fte);
+ fg->id,
+ modify_mask,
+ fte);
if (err)
pr_warn("%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
@@ -464,7 +474,7 @@ static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
return fg;
}
-static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
+static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_fte,
enum fs_flow_table_type table_type)
{
struct mlx5_flow_table *ft;
@@ -476,6 +486,7 @@ static struct mlx5_flow_table *alloc_flow_table(int level, int max_fte,
ft->level = level;
ft->node.type = FS_TYPE_FLOW_TABLE;
ft->type = table_type;
+ ft->vport = vport;
ft->max_fte = max_fte;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
@@ -615,12 +626,13 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
return err;
}
-static int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
- struct mlx5_flow_destination *dest)
+int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
+ struct mlx5_flow_destination *dest)
{
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
struct fs_fte *fte;
+ int modify_mask = BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
int err = 0;
fs_get_obj(fte, rule->node.parent);
@@ -632,7 +644,9 @@ static int mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
memcpy(&rule->dest_attr, dest, sizeof(*dest));
err = mlx5_cmd_update_fte(get_dev(&ft->node),
- ft, fg->id, fte);
+ ft, fg->id,
+ modify_mask,
+ fte);
unlock_ref_node(&fte->node);
return err;
@@ -693,9 +707,23 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table
return err;
}
-struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int max_fte)
+static void list_add_flow_table(struct mlx5_flow_table *ft,
+ struct fs_prio *prio)
+{
+ struct list_head *prev = &prio->node.children;
+ struct mlx5_flow_table *iter;
+
+ fs_for_each_ft(iter, prio) {
+ if (iter->level > ft->level)
+ break;
+ prev = &iter->node.list;
+ }
+ list_add(&ft->node.list, prev);
+}
+
+static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ u16 vport, int prio,
+ int max_fte, u32 level)
{
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_table *ft;
@@ -716,12 +744,16 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
err = -EINVAL;
goto unlock_root;
}
- if (fs_prio->num_ft == fs_prio->max_ft) {
+ if (level >= fs_prio->num_levels) {
err = -ENOSPC;
goto unlock_root;
}
-
- ft = alloc_flow_table(find_next_free_level(fs_prio),
+ /* The level is related to the
+ * priority level range.
+ */
+ level += fs_prio->start_level;
+ ft = alloc_flow_table(level,
+ vport,
roundup_pow_of_two(max_fte),
root->table_type);
if (!ft) {
@@ -732,7 +764,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
tree_init_node(&ft->node, 1, del_flow_table);
log_table_sz = ilog2(ft->max_fte);
next_ft = find_next_chained_ft(fs_prio);
- err = mlx5_cmd_create_flow_table(root->dev, ft->type, ft->level,
+ err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->type, ft->level,
log_table_sz, next_ft, &ft->id);
if (err)
goto free_ft;
@@ -742,7 +774,7 @@ struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
goto destroy_ft;
lock_ref_node(&fs_prio->node);
tree_add_node(&ft->node, &fs_prio->node);
- list_add_tail(&ft->node.list, &fs_prio->node.children);
+ list_add_flow_table(ft, fs_prio);
fs_prio->num_ft++;
unlock_ref_node(&fs_prio->node);
mutex_unlock(&root->chain_lock);
@@ -756,17 +788,32 @@ unlock_root:
return ERR_PTR(err);
}
+struct mlx5_flow_table *mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
+ int prio, int max_fte,
+ u32 level)
+{
+ return __mlx5_create_flow_table(ns, 0, prio, max_fte, level);
+}
+
+struct mlx5_flow_table *mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
+ int prio, int max_fte,
+ u32 level, u16 vport)
+{
+ return __mlx5_create_flow_table(ns, vport, prio, max_fte, level);
+}
+
struct mlx5_flow_table *mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
- int max_num_groups)
+ int max_num_groups,
+ u32 level)
{
struct mlx5_flow_table *ft;
if (max_num_groups > num_flow_table_entries)
return ERR_PTR(-EINVAL);
- ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries);
+ ft = mlx5_create_flow_table(ns, prio, num_flow_table_entries, level);
if (IS_ERR(ft))
return ft;
@@ -850,6 +897,7 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
{
struct mlx5_flow_table *ft;
struct mlx5_flow_rule *rule;
+ int modify_mask = 0;
int err;
rule = alloc_rule(dest);
@@ -865,14 +913,20 @@ static struct mlx5_flow_rule *add_rule_fte(struct fs_fte *fte,
list_add(&rule->node.list, &fte->node.children);
else
list_add_tail(&rule->node.list, &fte->node.children);
- if (dest)
+ if (dest) {
fte->dests_size++;
+
+ modify_mask |= dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER ?
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_FLOW_COUNTERS) :
+ BIT(MLX5_SET_FTE_MODIFY_ENABLE_MASK_DESTINATION_LIST);
+ }
+
if (fte->dests_size == 1 || !dest)
err = mlx5_cmd_create_fte(get_dev(&ft->node),
ft, fg->id, fte);
else
err = mlx5_cmd_update_fte(get_dev(&ft->node),
- ft, fg->id, fte);
+ ft, fg->id, modify_mask, fte);
if (err)
goto free_rule;
@@ -1065,11 +1119,53 @@ unlock_fg:
return rule;
}
+struct mlx5_fc *mlx5_flow_rule_counter(struct mlx5_flow_rule *rule)
+{
+ struct mlx5_flow_rule *dst;
+ struct fs_fte *fte;
+
+ fs_get_obj(fte, rule->node.parent);
+
+ fs_for_each_dst(dst, fte) {
+ if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER)
+ return dst->dest_attr.counter;
+ }
+
+ return NULL;
+}
+
+static bool counter_is_valid(struct mlx5_fc *counter, u32 action)
+{
+ if (!(action & MLX5_FLOW_CONTEXT_ACTION_COUNT))
+ return !counter;
+
+ if (!counter)
+ return false;
+
+ /* Hardware support counter for a drop action only */
+ return action == (MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT);
+}
+
+static bool dest_is_valid(struct mlx5_flow_destination *dest,
+ u32 action,
+ struct mlx5_flow_table *ft)
+{
+ if (dest && (dest->type == MLX5_FLOW_DESTINATION_TYPE_COUNTER))
+ return counter_is_valid(dest->counter, action);
+
+ if (!(action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
+ return true;
+
+ if (!dest || ((dest->type ==
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) &&
+ (dest->ft->level <= ft->level)))
+ return false;
+ return true;
+}
+
static struct mlx5_flow_rule *
_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria,
- u32 *match_value,
+ struct mlx5_flow_spec *spec,
u32 action,
u32 flow_tag,
struct mlx5_flow_destination *dest)
@@ -1077,28 +1173,29 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
struct mlx5_flow_group *g;
struct mlx5_flow_rule *rule;
- if ((action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) && !dest)
+ if (!dest_is_valid(dest, action, ft))
return ERR_PTR(-EINVAL);
nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
fs_for_each_fg(g, ft)
if (compare_match_criteria(g->mask.match_criteria_enable,
- match_criteria_enable,
+ spec->match_criteria_enable,
g->mask.match_criteria,
- match_criteria)) {
- rule = add_rule_fg(g, match_value,
+ spec->match_criteria)) {
+ rule = add_rule_fg(g, spec->match_value,
action, flow_tag, dest);
if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC)
goto unlock;
}
- g = create_autogroup(ft, match_criteria_enable, match_criteria);
+ g = create_autogroup(ft, spec->match_criteria_enable,
+ spec->match_criteria);
if (IS_ERR(g)) {
rule = (void *)g;
goto unlock;
}
- rule = add_rule_fg(g, match_value,
+ rule = add_rule_fg(g, spec->match_value,
action, flow_tag, dest);
if (IS_ERR(rule)) {
/* Remove assumes refcount > 0 and autogroup creates a group
@@ -1122,9 +1219,7 @@ static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
struct mlx5_flow_rule *
mlx5_add_flow_rule(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria,
- u32 *match_value,
+ struct mlx5_flow_spec *spec,
u32 action,
u32 flow_tag,
struct mlx5_flow_destination *dest)
@@ -1155,8 +1250,7 @@ mlx5_add_flow_rule(struct mlx5_flow_table *ft,
}
}
- rule = _mlx5_add_flow_rule(ft, match_criteria_enable, match_criteria,
- match_value, action, flow_tag, dest);
+ rule = _mlx5_add_flow_rule(ft, spec, action, flow_tag, dest);
if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!IS_ERR_OR_NULL(rule) &&
@@ -1207,8 +1301,8 @@ static int update_root_ft_destroy(struct mlx5_flow_table *ft)
ft->id);
return err;
}
- root->root_ft = new_root_ft;
}
+ root->root_ft = new_root_ft;
return 0;
}
@@ -1274,30 +1368,47 @@ void mlx5_destroy_flow_group(struct mlx5_flow_group *fg)
struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type)
{
- struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
+ struct mlx5_flow_steering *steering = dev->priv.steering;
+ struct mlx5_flow_root_namespace *root_ns;
int prio;
struct fs_prio *fs_prio;
struct mlx5_flow_namespace *ns;
- if (!root_ns)
+ if (!steering)
return NULL;
switch (type) {
case MLX5_FLOW_NAMESPACE_BYPASS:
+ case MLX5_FLOW_NAMESPACE_OFFLOADS:
+ case MLX5_FLOW_NAMESPACE_ETHTOOL:
case MLX5_FLOW_NAMESPACE_KERNEL:
case MLX5_FLOW_NAMESPACE_LEFTOVERS:
case MLX5_FLOW_NAMESPACE_ANCHOR:
prio = type;
break;
case MLX5_FLOW_NAMESPACE_FDB:
- if (dev->priv.fdb_root_ns)
- return &dev->priv.fdb_root_ns->ns;
+ if (steering->fdb_root_ns)
+ return &steering->fdb_root_ns->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_ESW_EGRESS:
+ if (steering->esw_egress_root_ns)
+ return &steering->esw_egress_root_ns->ns;
+ else
+ return NULL;
+ case MLX5_FLOW_NAMESPACE_ESW_INGRESS:
+ if (steering->esw_ingress_root_ns)
+ return &steering->esw_ingress_root_ns->ns;
else
return NULL;
default:
return NULL;
}
+ root_ns = steering->root_ns;
+ if (!root_ns)
+ return NULL;
+
fs_prio = find_prio(&root_ns->ns, prio);
if (!fs_prio)
return NULL;
@@ -1311,7 +1422,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
EXPORT_SYMBOL(mlx5_get_flow_namespace);
static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
- unsigned prio, int max_ft)
+ unsigned int prio, int num_levels)
{
struct fs_prio *fs_prio;
@@ -1322,7 +1433,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
fs_prio->node.type = FS_TYPE_PRIO;
tree_init_node(&fs_prio->node, 1, NULL);
tree_add_node(&fs_prio->node, &ns->node);
- fs_prio->max_ft = max_ft;
+ fs_prio->num_levels = num_levels;
fs_prio->prio = prio;
list_add_tail(&fs_prio->node.list, &ns->node.children);
@@ -1353,14 +1464,14 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
return ns;
}
-static int create_leaf_prios(struct mlx5_flow_namespace *ns, struct init_tree_node
- *prio_metadata)
+static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
+ struct init_tree_node *prio_metadata)
{
struct fs_prio *fs_prio;
int i;
for (i = 0; i < prio_metadata->num_leaf_prios; i++) {
- fs_prio = fs_create_prio(ns, i, prio_metadata->max_ft);
+ fs_prio = fs_create_prio(ns, prio++, prio_metadata->num_levels);
if (IS_ERR(fs_prio))
return PTR_ERR(fs_prio);
}
@@ -1383,13 +1494,13 @@ static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
return true;
}
-static int init_root_tree_recursive(struct mlx5_core_dev *dev,
+static int init_root_tree_recursive(struct mlx5_flow_steering *steering,
struct init_tree_node *init_node,
struct fs_node *fs_parent_node,
struct init_tree_node *init_parent_node,
- int index)
+ int prio)
{
- int max_ft_level = MLX5_CAP_FLOWTABLE(dev,
+ int max_ft_level = MLX5_CAP_FLOWTABLE(steering->dev,
flow_table_properties_nic_receive.
max_ft_level);
struct mlx5_flow_namespace *fs_ns;
@@ -1400,13 +1511,13 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
if (init_node->type == FS_TYPE_PRIO) {
if ((init_node->min_ft_level > max_ft_level) ||
- !has_required_caps(dev, &init_node->caps))
+ !has_required_caps(steering->dev, &init_node->caps))
return 0;
fs_get_obj(fs_ns, fs_parent_node);
if (init_node->num_leaf_prios)
- return create_leaf_prios(fs_ns, init_node);
- fs_prio = fs_create_prio(fs_ns, index, init_node->max_ft);
+ return create_leaf_prios(fs_ns, prio, init_node);
+ fs_prio = fs_create_prio(fs_ns, prio, init_node->num_levels);
if (IS_ERR(fs_prio))
return PTR_ERR(fs_prio);
base = &fs_prio->node;
@@ -1419,17 +1530,22 @@ static int init_root_tree_recursive(struct mlx5_core_dev *dev,
} else {
return -EINVAL;
}
+ prio = 0;
for (i = 0; i < init_node->ar_size; i++) {
- err = init_root_tree_recursive(dev, &init_node->children[i],
- base, init_node, i);
+ err = init_root_tree_recursive(steering, &init_node->children[i],
+ base, init_node, prio);
if (err)
return err;
+ if (init_node->children[i].type == FS_TYPE_PRIO &&
+ init_node->children[i].num_leaf_prios) {
+ prio += init_node->children[i].num_leaf_prios;
+ }
}
return 0;
}
-static int init_root_tree(struct mlx5_core_dev *dev,
+static int init_root_tree(struct mlx5_flow_steering *steering,
struct init_tree_node *init_node,
struct fs_node *fs_parent_node)
{
@@ -1439,7 +1555,7 @@ static int init_root_tree(struct mlx5_core_dev *dev,
fs_get_obj(fs_ns, fs_parent_node);
for (i = 0; i < init_node->ar_size; i++) {
- err = init_root_tree_recursive(dev, &init_node->children[i],
+ err = init_root_tree_recursive(steering, &init_node->children[i],
&fs_ns->node,
init_node, i);
if (err)
@@ -1448,7 +1564,7 @@ static int init_root_tree(struct mlx5_core_dev *dev,
return 0;
}
-static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev,
+static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering *steering,
enum fs_flow_table_type
table_type)
{
@@ -1460,7 +1576,7 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_core_dev *dev
if (!root_ns)
return NULL;
- root_ns->dev = dev;
+ root_ns->dev = steering->dev;
root_ns->table_type = table_type;
ns = &root_ns->ns;
@@ -1479,9 +1595,9 @@ static int set_prio_attrs_in_ns(struct mlx5_flow_namespace *ns, int acc_level)
struct fs_prio *prio;
fs_for_each_prio(prio, ns) {
- /* This updates prio start_level and max_ft */
+ /* This updates prio start_level and num_levels */
set_prio_attrs_in_prio(prio, acc_level);
- acc_level += prio->max_ft;
+ acc_level += prio->num_levels;
}
return acc_level;
}
@@ -1493,11 +1609,11 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
prio->start_level = acc_level;
fs_for_each_ns(ns, prio)
- /* This updates start_level and max_ft of ns's priority descendants */
+ /* This updates start_level and num_levels of ns's priority descendants */
acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
- if (!prio->max_ft)
- prio->max_ft = acc_level_ns - prio->start_level;
- WARN_ON(prio->max_ft < acc_level_ns - prio->start_level);
+ if (!prio->num_levels)
+ prio->num_levels = acc_level_ns - prio->start_level;
+ WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
}
static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
@@ -1508,200 +1624,184 @@ static void set_prio_attrs(struct mlx5_flow_root_namespace *root_ns)
fs_for_each_prio(prio, ns) {
set_prio_attrs_in_prio(prio, start_level);
- start_level += prio->max_ft;
+ start_level += prio->num_levels;
}
}
#define ANCHOR_PRIO 0
#define ANCHOR_SIZE 1
-static int create_anchor_flow_table(struct mlx5_core_dev
- *dev)
+#define ANCHOR_LEVEL 0
+static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
{
struct mlx5_flow_namespace *ns = NULL;
struct mlx5_flow_table *ft;
- ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ANCHOR);
+ ns = mlx5_get_flow_namespace(steering->dev, MLX5_FLOW_NAMESPACE_ANCHOR);
if (!ns)
return -EINVAL;
- ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE);
+ ft = mlx5_create_flow_table(ns, ANCHOR_PRIO, ANCHOR_SIZE, ANCHOR_LEVEL);
if (IS_ERR(ft)) {
- mlx5_core_err(dev, "Failed to create last anchor flow table");
+ mlx5_core_err(steering->dev, "Failed to create last anchor flow table");
return PTR_ERR(ft);
}
return 0;
}
-static int init_root_ns(struct mlx5_core_dev *dev)
+static int init_root_ns(struct mlx5_flow_steering *steering)
{
- dev->priv.root_ns = create_root_ns(dev, FS_FT_NIC_RX);
- if (IS_ERR_OR_NULL(dev->priv.root_ns))
+ steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
+ if (IS_ERR_OR_NULL(steering->root_ns))
goto cleanup;
- if (init_root_tree(dev, &root_fs, &dev->priv.root_ns->ns.node))
+ if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
goto cleanup;
- set_prio_attrs(dev->priv.root_ns);
+ set_prio_attrs(steering->root_ns);
- if (create_anchor_flow_table(dev))
+ if (create_anchor_flow_table(steering))
goto cleanup;
return 0;
cleanup:
- mlx5_cleanup_fs(dev);
+ mlx5_cleanup_fs(steering->dev);
return -ENOMEM;
}
-static void cleanup_single_prio_root_ns(struct mlx5_core_dev *dev,
- struct mlx5_flow_root_namespace *root_ns)
+static void clean_tree(struct fs_node *node)
{
- struct fs_node *prio;
-
- if (!root_ns)
- return;
+ if (node) {
+ struct fs_node *iter;
+ struct fs_node *temp;
- if (!list_empty(&root_ns->ns.node.children)) {
- prio = list_first_entry(&root_ns->ns.node.children,
- struct fs_node,
- list);
- if (tree_remove_node(prio))
- mlx5_core_warn(dev,
- "Flow steering priority wasn't destroyed, refcount > 1\n");
+ list_for_each_entry_safe(iter, temp, &node->children, list)
+ clean_tree(iter);
+ tree_remove_node(node);
}
- if (tree_remove_node(&root_ns->ns.node))
- mlx5_core_warn(dev,
- "Flow steering namespace wasn't destroyed, refcount > 1\n");
- root_ns = NULL;
}
-static void destroy_flow_tables(struct fs_prio *prio)
+static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns)
{
- struct mlx5_flow_table *iter;
- struct mlx5_flow_table *tmp;
+ if (!root_ns)
+ return;
- fs_for_each_ft_safe(iter, tmp, prio)
- mlx5_destroy_flow_table(iter);
+ clean_tree(&root_ns->ns.node);
}
-static void cleanup_root_ns(struct mlx5_core_dev *dev)
+void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
{
- struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
- struct fs_prio *iter_prio;
+ struct mlx5_flow_steering *steering = dev->priv.steering;
- if (!MLX5_CAP_GEN(dev, nic_flow_table))
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return;
- if (!root_ns)
- return;
+ cleanup_root_ns(steering->root_ns);
+ cleanup_root_ns(steering->esw_egress_root_ns);
+ cleanup_root_ns(steering->esw_ingress_root_ns);
+ cleanup_root_ns(steering->fdb_root_ns);
+ mlx5_cleanup_fc_stats(dev);
+ kfree(steering);
+}
- /* stage 1 */
- fs_for_each_prio(iter_prio, &root_ns->ns) {
- struct fs_node *node;
- struct mlx5_flow_namespace *iter_ns;
-
- fs_for_each_ns_or_ft(node, iter_prio) {
- if (node->type == FS_TYPE_FLOW_TABLE)
- continue;
- fs_get_obj(iter_ns, node);
- while (!list_empty(&iter_ns->node.children)) {
- struct fs_prio *obj_iter_prio2;
- struct fs_node *iter_prio2 =
- list_first_entry(&iter_ns->node.children,
- struct fs_node,
- list);
-
- fs_get_obj(obj_iter_prio2, iter_prio2);
- destroy_flow_tables(obj_iter_prio2);
- if (tree_remove_node(iter_prio2)) {
- mlx5_core_warn(dev,
- "Priority %d wasn't destroyed, refcount > 1\n",
- obj_iter_prio2->prio);
- return;
- }
- }
- }
- }
+static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *prio;
- /* stage 2 */
- fs_for_each_prio(iter_prio, &root_ns->ns) {
- while (!list_empty(&iter_prio->node.children)) {
- struct fs_node *iter_ns =
- list_first_entry(&iter_prio->node.children,
- struct fs_node,
- list);
- if (tree_remove_node(iter_ns)) {
- mlx5_core_warn(dev,
- "Namespace wasn't destroyed, refcount > 1\n");
- return;
- }
- }
- }
+ steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
+ if (!steering->fdb_root_ns)
+ return -ENOMEM;
- /* stage 3 */
- while (!list_empty(&root_ns->ns.node.children)) {
- struct fs_prio *obj_prio_node;
- struct fs_node *prio_node =
- list_first_entry(&root_ns->ns.node.children,
- struct fs_node,
- list);
-
- fs_get_obj(obj_prio_node, prio_node);
- if (tree_remove_node(prio_node)) {
- mlx5_core_warn(dev,
- "Priority %d wasn't destroyed, refcount > 1\n",
- obj_prio_node->prio);
- return;
- }
- }
+ prio = fs_create_prio(&steering->fdb_root_ns->ns, 0, 1);
+ if (IS_ERR(prio))
+ goto out_err;
- if (tree_remove_node(&root_ns->ns.node)) {
- mlx5_core_warn(dev,
- "root namespace wasn't destroyed, refcount > 1\n");
- return;
- }
+ prio = fs_create_prio(&steering->fdb_root_ns->ns, 1, 1);
+ if (IS_ERR(prio))
+ goto out_err;
- dev->priv.root_ns = NULL;
+ set_prio_attrs(steering->fdb_root_ns);
+ return 0;
+
+out_err:
+ cleanup_root_ns(steering->fdb_root_ns);
+ steering->fdb_root_ns = NULL;
+ return PTR_ERR(prio);
}
-void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
+static int init_ingress_acl_root_ns(struct mlx5_flow_steering *steering)
{
- cleanup_root_ns(dev);
- cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
+ struct fs_prio *prio;
+
+ steering->esw_egress_root_ns = create_root_ns(steering, FS_FT_ESW_EGRESS_ACL);
+ if (!steering->esw_egress_root_ns)
+ return -ENOMEM;
+
+ /* create 1 prio*/
+ prio = fs_create_prio(&steering->esw_egress_root_ns->ns, 0,
+ MLX5_TOTAL_VPORTS(steering->dev));
+ return PTR_ERR_OR_ZERO(prio);
}
-static int init_fdb_root_ns(struct mlx5_core_dev *dev)
+static int init_egress_acl_root_ns(struct mlx5_flow_steering *steering)
{
struct fs_prio *prio;
- dev->priv.fdb_root_ns = create_root_ns(dev, FS_FT_FDB);
- if (!dev->priv.fdb_root_ns)
+ steering->esw_ingress_root_ns = create_root_ns(steering, FS_FT_ESW_INGRESS_ACL);
+ if (!steering->esw_ingress_root_ns)
return -ENOMEM;
- /* Create single prio */
- prio = fs_create_prio(&dev->priv.fdb_root_ns->ns, 0, 1);
- if (IS_ERR(prio)) {
- cleanup_single_prio_root_ns(dev, dev->priv.fdb_root_ns);
- return PTR_ERR(prio);
- } else {
- return 0;
- }
+ /* create 1 prio*/
+ prio = fs_create_prio(&steering->esw_ingress_root_ns->ns, 0,
+ MLX5_TOTAL_VPORTS(steering->dev));
+ return PTR_ERR_OR_ZERO(prio);
}
int mlx5_init_fs(struct mlx5_core_dev *dev)
{
+ struct mlx5_flow_steering *steering;
int err = 0;
- if (MLX5_CAP_GEN(dev, nic_flow_table)) {
- err = init_root_ns(dev);
+ if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return 0;
+
+ err = mlx5_init_fc_stats(dev);
+ if (err)
+ return err;
+
+ steering = kzalloc(sizeof(*steering), GFP_KERNEL);
+ if (!steering)
+ return -ENOMEM;
+ steering->dev = dev;
+ dev->priv.steering = steering;
+
+ if (MLX5_CAP_GEN(dev, nic_flow_table) &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev, ft_support)) {
+ err = init_root_ns(steering);
if (err)
- return err;
+ goto err;
}
+
if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
- err = init_fdb_root_ns(dev);
- if (err)
- cleanup_root_ns(dev);
+ if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
+ err = init_fdb_root_ns(steering);
+ if (err)
+ goto err;
+ }
+ if (MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support)) {
+ err = init_egress_acl_root_ns(steering);
+ if (err)
+ goto err;
+ }
+ if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
+ err = init_ingress_acl_root_ns(steering);
+ if (err)
+ goto err;
+ }
}
+ return 0;
+err:
+ mlx5_cleanup_fs(dev);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index f37a6248a27b..9cffb6aeb4e9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -45,14 +45,24 @@ enum fs_node_type {
};
enum fs_flow_table_type {
- FS_FT_NIC_RX = 0x0,
- FS_FT_FDB = 0X4,
+ FS_FT_NIC_RX = 0x0,
+ FS_FT_ESW_EGRESS_ACL = 0x2,
+ FS_FT_ESW_INGRESS_ACL = 0x3,
+ FS_FT_FDB = 0X4,
};
enum fs_fte_status {
FS_FTE_STATUS_EXISTING = 1UL << 0,
};
+struct mlx5_flow_steering {
+ struct mlx5_core_dev *dev;
+ struct mlx5_flow_root_namespace *root_ns;
+ struct mlx5_flow_root_namespace *fdb_root_ns;
+ struct mlx5_flow_root_namespace *esw_egress_root_ns;
+ struct mlx5_flow_root_namespace *esw_ingress_root_ns;
+};
+
struct fs_node {
struct list_head list;
struct list_head children;
@@ -79,6 +89,7 @@ struct mlx5_flow_rule {
struct mlx5_flow_table {
struct fs_node node;
u32 id;
+ u16 vport;
unsigned int max_fte;
unsigned int level;
enum fs_flow_table_type type;
@@ -93,6 +104,29 @@ struct mlx5_flow_table {
struct list_head fwd_rules;
};
+struct mlx5_fc_cache {
+ u64 packets;
+ u64 bytes;
+ u64 lastuse;
+};
+
+struct mlx5_fc {
+ struct rb_node node;
+ struct list_head list;
+
+ /* last{packets,bytes} members are used when calculating the delta since
+ * last reading
+ */
+ u64 lastpackets;
+ u64 lastbytes;
+
+ u16 id;
+ bool deleted;
+ bool aging;
+
+ struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
+};
+
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
@@ -102,12 +136,13 @@ struct fs_fte {
u32 index;
u32 action;
enum fs_fte_status status;
+ struct mlx5_fc *counter;
};
/* Type of children is mlx5_flow_table/namespace */
struct fs_prio {
struct fs_node node;
- unsigned int max_ft;
+ unsigned int num_levels;
unsigned int start_level;
unsigned int prio;
unsigned int num_ft;
@@ -143,6 +178,9 @@ struct mlx5_flow_root_namespace {
struct mutex chain_lock;
};
+int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
+void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev);
+
int mlx5_init_fs(struct mlx5_core_dev *dev);
void mlx5_cleanup_fs(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
new file mode 100644
index 000000000000..3a9195b4169d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -0,0 +1,318 @@
+/*
+ * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/fs.h>
+#include <linux/rbtree.h>
+#include "mlx5_core.h"
+#include "fs_core.h"
+#include "fs_cmd.h"
+
+#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
+
+/* locking scheme:
+ *
+ * It is the responsibility of the user to prevent concurrent calls or bad
+ * ordering to mlx5_fc_create(), mlx5_fc_destroy() and accessing a reference
+ * to struct mlx5_fc.
+ * e.g en_tc.c is protected by RTNL lock of its caller, and will never call a
+ * dump (access to struct mlx5_fc) after a counter is destroyed.
+ *
+ * access to counter list:
+ * - create (user context)
+ * - mlx5_fc_create() only adds to an addlist to be used by
+ * mlx5_fc_stats_query_work(). addlist is protected by a spinlock.
+ * - spawn thread to do the actual destroy
+ *
+ * - destroy (user context)
+ * - mark a counter as deleted
+ * - spawn thread to do the actual del
+ *
+ * - dump (user context)
+ * user should not call dump after destroy
+ *
+ * - query (single thread workqueue context)
+ * destroy/dump - no conflict (see destroy)
+ * query/dump - packets and bytes might be inconsistent (since update is not
+ * atomic)
+ * query/create - no conflict (see create)
+ * since every create/destroy spawn the work, only after necessary time has
+ * elapsed, the thread will actually query the hardware.
+ */
+
+static void mlx5_fc_stats_insert(struct rb_root *root, struct mlx5_fc *counter)
+{
+ struct rb_node **new = &root->rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*new) {
+ struct mlx5_fc *this = container_of(*new, struct mlx5_fc, node);
+ int result = counter->id - this->id;
+
+ parent = *new;
+ if (result < 0)
+ new = &((*new)->rb_left);
+ else
+ new = &((*new)->rb_right);
+ }
+
+ /* Add new node and rebalance tree. */
+ rb_link_node(&counter->node, parent, new);
+ rb_insert_color(&counter->node, root);
+}
+
+static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
+ struct mlx5_fc *first,
+ u16 last_id)
+{
+ struct mlx5_cmd_fc_bulk *b;
+ struct rb_node *node = NULL;
+ u16 afirst_id;
+ int num;
+ int err;
+ int max_bulk = 1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk);
+
+ /* first id must be aligned to 4 when using bulk query */
+ afirst_id = first->id & ~0x3;
+
+ /* number of counters to query inc. the last counter */
+ num = ALIGN(last_id - afirst_id + 1, 4);
+ if (num > max_bulk) {
+ num = max_bulk;
+ last_id = afirst_id + num - 1;
+ }
+
+ b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
+ if (!b) {
+ mlx5_core_err(dev, "Error allocating resources for bulk query\n");
+ return NULL;
+ }
+
+ err = mlx5_cmd_fc_bulk_query(dev, b);
+ if (err) {
+ mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
+ goto out;
+ }
+
+ for (node = &first->node; node; node = rb_next(node)) {
+ struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
+ struct mlx5_fc_cache *c = &counter->cache;
+ u64 packets;
+ u64 bytes;
+
+ if (counter->id > last_id)
+ break;
+
+ mlx5_cmd_fc_bulk_get(dev, b,
+ counter->id, &packets, &bytes);
+
+ if (c->packets == packets)
+ continue;
+
+ c->packets = packets;
+ c->bytes = bytes;
+ c->lastuse = jiffies;
+ }
+
+out:
+ mlx5_cmd_fc_bulk_free(b);
+
+ return node;
+}
+
+static void mlx5_fc_stats_work(struct work_struct *work)
+{
+ struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
+ priv.fc_stats.work.work);
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ unsigned long now = jiffies;
+ struct mlx5_fc *counter = NULL;
+ struct mlx5_fc *last = NULL;
+ struct rb_node *node;
+ LIST_HEAD(tmplist);
+
+ spin_lock(&fc_stats->addlist_lock);
+
+ list_splice_tail_init(&fc_stats->addlist, &tmplist);
+
+ if (!list_empty(&tmplist) || !RB_EMPTY_ROOT(&fc_stats->counters))
+ queue_delayed_work(fc_stats->wq, &fc_stats->work, MLX5_FC_STATS_PERIOD);
+
+ spin_unlock(&fc_stats->addlist_lock);
+
+ list_for_each_entry(counter, &tmplist, list)
+ mlx5_fc_stats_insert(&fc_stats->counters, counter);
+
+ node = rb_first(&fc_stats->counters);
+ while (node) {
+ counter = rb_entry(node, struct mlx5_fc, node);
+
+ node = rb_next(node);
+
+ if (counter->deleted) {
+ rb_erase(&counter->node, &fc_stats->counters);
+
+ mlx5_cmd_fc_free(dev, counter->id);
+
+ kfree(counter);
+ continue;
+ }
+
+ last = counter;
+ }
+
+ if (time_before(now, fc_stats->next_query) || !last)
+ return;
+
+ node = rb_first(&fc_stats->counters);
+ while (node) {
+ counter = rb_entry(node, struct mlx5_fc, node);
+
+ node = mlx5_fc_stats_query(dev, counter, last->id);
+ }
+
+ fc_stats->next_query = now + MLX5_FC_STATS_PERIOD;
+}
+
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc *counter;
+ int err;
+
+ counter = kzalloc(sizeof(*counter), GFP_KERNEL);
+ if (!counter)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5_cmd_fc_alloc(dev, &counter->id);
+ if (err)
+ goto err_out;
+
+ if (aging) {
+ counter->aging = true;
+
+ spin_lock(&fc_stats->addlist_lock);
+ list_add(&counter->list, &fc_stats->addlist);
+ spin_unlock(&fc_stats->addlist_lock);
+
+ mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
+ }
+
+ return counter;
+
+err_out:
+ kfree(counter);
+
+ return ERR_PTR(err);
+}
+
+void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ if (!counter)
+ return;
+
+ if (counter->aging) {
+ counter->deleted = true;
+ mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
+ return;
+ }
+
+ mlx5_cmd_fc_free(dev, counter->id);
+ kfree(counter);
+}
+
+int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ fc_stats->counters = RB_ROOT;
+ INIT_LIST_HEAD(&fc_stats->addlist);
+ spin_lock_init(&fc_stats->addlist_lock);
+
+ fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
+ if (!fc_stats->wq)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
+
+ return 0;
+}
+
+void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
+{
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+ struct mlx5_fc *counter;
+ struct mlx5_fc *tmp;
+ struct rb_node *node;
+
+ cancel_delayed_work_sync(&dev->priv.fc_stats.work);
+ destroy_workqueue(dev->priv.fc_stats.wq);
+ dev->priv.fc_stats.wq = NULL;
+
+ list_for_each_entry_safe(counter, tmp, &fc_stats->addlist, list) {
+ list_del(&counter->list);
+
+ mlx5_cmd_fc_free(dev, counter->id);
+
+ kfree(counter);
+ }
+
+ node = rb_first(&fc_stats->counters);
+ while (node) {
+ counter = rb_entry(node, struct mlx5_fc, node);
+
+ node = rb_next(node);
+
+ rb_erase(&counter->node, &fc_stats->counters);
+
+ mlx5_cmd_fc_free(dev, counter->id);
+
+ kfree(counter);
+ }
+}
+
+void mlx5_fc_query_cached(struct mlx5_fc *counter,
+ u64 *bytes, u64 *packets, u64 *lastuse)
+{
+ struct mlx5_fc_cache c;
+
+ c = counter->cache;
+
+ *bytes = c.bytes - counter->lastbytes;
+ *packets = c.packets - counter->lastpackets;
+ *lastuse = c.lastuse;
+
+ counter->lastbytes = c.bytes;
+ counter->lastpackets = c.packets;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 75c7ae6a5cc4..77fc1aa26114 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -151,6 +151,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, qos)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_QOS);
+ if (err)
+ return err;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index f5deb642d0d6..1a05fb965c8d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -108,15 +108,21 @@ static int in_fatal(struct mlx5_core_dev *dev)
void mlx5_enter_error_state(struct mlx5_core_dev *dev)
{
+ mutex_lock(&dev->intf_state_mutex);
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
- return;
+ goto unlock;
mlx5_core_err(dev, "start\n");
- if (pci_channel_offline(dev->pdev) || in_fatal(dev))
+ if (pci_channel_offline(dev->pdev) || in_fatal(dev)) {
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ trigger_cmd_completions(dev);
+ }
mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 0);
mlx5_core_err(dev, "end\n");
+
+unlock:
+ mutex_unlock(&dev->intf_state_mutex);
}
static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
@@ -176,11 +182,11 @@ static const char *hsynd_str(u8 synd)
case MLX5_HEALTH_SYNDR_EQ_ERR:
return "EQ error";
case MLX5_HEALTH_SYNDR_EQ_INV:
- return "Invalid EQ refrenced";
+ return "Invalid EQ referenced";
case MLX5_HEALTH_SYNDR_FFSER_ERR:
return "FFSER error";
case MLX5_HEALTH_SYNDR_HIGH_TEMP:
- return "High temprature";
+ return "High temperature";
default:
return "unrecognized error";
}
@@ -245,7 +251,6 @@ static void poll_health(unsigned long data)
u32 count;
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- trigger_cmd_completions(dev);
mod_timer(&health->timer, get_next_poll_jiffies());
return;
}
@@ -267,7 +272,7 @@ static void poll_health(unsigned long data)
if (in_fatal(dev) && !health->sick) {
health->sick = true;
print_health_info(dev);
- queue_work(health->wq, &health->work);
+ schedule_work(&health->work);
}
}
@@ -296,7 +301,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- destroy_workqueue(health->wq);
+ flush_work(&health->work);
}
int mlx5_health_init(struct mlx5_core_dev *dev)
@@ -311,10 +316,7 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
strcpy(name, "mlx5_health");
strcat(name, dev_name(&dev->pdev->dev));
- health->wq = create_singlethread_workqueue(name);
kfree(name);
- if (!health->wq)
- return -ENOMEM;
INIT_WORK(&health->work, health_care);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 6892746fd10d..2385bae92672 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -48,6 +48,10 @@
#include <linux/kmod.h>
#include <linux/delay.h>
#include <linux/mlx5/mlx5_ifc.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
+#include <net/devlink.h>
#include "mlx5_core.h"
#include "fs_core.h"
#ifdef CONFIG_MLX5_CORE_EN
@@ -660,11 +664,34 @@ int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
}
EXPORT_SYMBOL(mlx5_vector2eqn);
+struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn)
+{
+ struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_eq *eq;
+
+ spin_lock(&table->lock);
+ list_for_each_entry(eq, &table->comp_eqs_list, list)
+ if (eq->eqn == eqn) {
+ spin_unlock(&table->lock);
+ return eq;
+ }
+
+ spin_unlock(&table->lock);
+
+ return ERR_PTR(-ENOENT);
+}
+
static void free_comp_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
struct mlx5_eq *eq, *n;
+#ifdef CONFIG_RFS_ACCEL
+ if (dev->rmap) {
+ free_irq_cpu_rmap(dev->rmap);
+ dev->rmap = NULL;
+ }
+#endif
spin_lock(&table->lock);
list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
list_del(&eq->list);
@@ -691,6 +718,11 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
INIT_LIST_HEAD(&table->comp_eqs_list);
ncomp_vec = table->num_comp_vectors;
nent = MLX5_COMP_EQ_SIZE;
+#ifdef CONFIG_RFS_ACCEL
+ dev->rmap = alloc_irq_cpu_rmap(ncomp_vec);
+ if (!dev->rmap)
+ return -ENOMEM;
+#endif
for (i = 0; i < ncomp_vec; i++) {
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
if (!eq) {
@@ -698,6 +730,10 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
goto clean;
}
+#ifdef CONFIG_RFS_ACCEL
+ irq_cpu_rmap_add(dev->rmap,
+ dev->priv.msix_arr[i + MLX5_EQ_VEC_COMP_BASE].vector);
+#endif
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
err = mlx5_create_map_eq(dev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
@@ -1109,6 +1145,13 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
dev_err(&pdev->dev, "Failed to init flow steering\n");
goto err_fs;
}
+
+ err = mlx5_init_rl_table(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init rate limiting\n");
+ goto err_rl;
+ }
+
#ifdef CONFIG_MLX5_CORE_EN
err = mlx5_eswitch_init(dev);
if (err) {
@@ -1148,6 +1191,8 @@ err_sriov:
mlx5_eswitch_cleanup(dev->priv.eswitch);
#endif
err_reg_dev:
+ mlx5_cleanup_rl_table(dev);
+err_rl:
mlx5_cleanup_fs(dev);
err_fs:
mlx5_cleanup_mkey_table(dev);
@@ -1218,6 +1263,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_eswitch_cleanup(dev->priv.eswitch);
#endif
+ mlx5_cleanup_rl_table(dev);
mlx5_cleanup_fs(dev);
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
@@ -1270,19 +1316,28 @@ struct mlx5_core_event_handler {
void *data);
};
+static const struct devlink_ops mlx5_devlink_ops = {
+#ifdef CONFIG_MLX5_CORE_EN
+ .eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
+ .eswitch_mode_get = mlx5_devlink_eswitch_mode_get,
+#endif
+};
static int init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct mlx5_core_dev *dev;
+ struct devlink *devlink;
struct mlx5_priv *priv;
int err;
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
+ devlink = devlink_alloc(&mlx5_devlink_ops, sizeof(*dev));
+ if (!devlink) {
dev_err(&pdev->dev, "kzalloc failed\n");
return -ENOMEM;
}
+
+ dev = devlink_priv(devlink);
priv = &dev->priv;
priv->pci_dev_data = id->driver_data;
@@ -1319,15 +1374,21 @@ static int init_one(struct pci_dev *pdev,
goto clean_health;
}
+ err = devlink_register(devlink, &pdev->dev);
+ if (err)
+ goto clean_load;
+
return 0;
+clean_load:
+ mlx5_unload_one(dev, priv);
clean_health:
mlx5_health_cleanup(dev);
close_pci:
mlx5_pci_close(dev, priv);
clean_dev:
pci_set_drvdata(pdev, NULL);
- kfree(dev);
+ devlink_free(devlink);
return err;
}
@@ -1335,8 +1396,10 @@ clean_dev:
static void remove_one(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct devlink *devlink = priv_to_devlink(dev);
struct mlx5_priv *priv = &dev->priv;
+ devlink_unregister(devlink);
if (mlx5_unload_one(dev, priv)) {
dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
mlx5_health_cleanup(dev);
@@ -1345,7 +1408,7 @@ static void remove_one(struct pci_dev *pdev)
mlx5_health_cleanup(dev);
mlx5_pci_close(dev, priv);
pci_set_drvdata(pdev, NULL);
- kfree(dev);
+ devlink_free(devlink);
}
static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
@@ -1357,15 +1420,43 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s was called\n", __func__);
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv);
+ pci_save_state(pdev);
mlx5_pci_disable_device(dev);
return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}
+/* wait for the device to show vital signs by waiting
+ * for the health counter to start counting.
+ */
+static int wait_vital(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_core_health *health = &dev->priv.health;
+ const int niter = 100;
+ u32 last_count = 0;
+ u32 count;
+ int i;
+
+ for (i = 0; i < niter; i++) {
+ count = ioread32be(health->health_counter);
+ if (count && count != 0xffffffff) {
+ if (last_count && last_count != count) {
+ dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
+ return 0;
+ }
+ last_count = count;
+ }
+ msleep(50);
+ }
+
+ return -ETIMEDOUT;
+}
+
static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- int err = 0;
+ int err;
dev_info(&pdev->dev, "%s was called\n", __func__);
@@ -1375,11 +1466,16 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
, __func__, err);
return PCI_ERS_RESULT_DISCONNECT;
}
+
pci_set_master(pdev);
- pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
- return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+ if (wait_vital(pdev)) {
+ dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
}
void mlx5_disable_device(struct mlx5_core_dev *dev)
@@ -1387,48 +1483,6 @@ void mlx5_disable_device(struct mlx5_core_dev *dev)
mlx5_pci_err_detected(dev->pdev, 0);
}
-/* wait for the device to show vital signs. For now we check
- * that we can read the device ID and that the health buffer
- * shows a non zero value which is different than 0xffffffff
- */
-static void wait_vital(struct pci_dev *pdev)
-{
- struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_core_health *health = &dev->priv.health;
- const int niter = 100;
- u32 count;
- u16 did;
- int i;
-
- /* Wait for firmware to be ready after reset */
- msleep(1000);
- for (i = 0; i < niter; i++) {
- if (pci_read_config_word(pdev, 2, &did)) {
- dev_warn(&pdev->dev, "failed reading config word\n");
- break;
- }
- if (did == pdev->device) {
- dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
- break;
- }
- msleep(50);
- }
- if (i == niter)
- dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
-
- for (i = 0; i < niter; i++) {
- count = ioread32be(health->health_counter);
- if (count && count != 0xffffffff) {
- dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
- break;
- }
- msleep(50);
- }
-
- if (i == niter)
- dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
-}
-
static void mlx5_pci_resume(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
@@ -1437,9 +1491,6 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
dev_info(&pdev->dev, "%s was called\n", __func__);
- pci_save_state(pdev);
- wait_vital(pdev);
-
err = mlx5_load_one(dev, priv);
if (err)
dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
@@ -1473,8 +1524,9 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
{ PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
- { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
+ { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
{ PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
+ { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5, PCIe 4.0 */
{ 0, }
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 0b0b226c789e..2f86ec6fcf25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -42,6 +42,8 @@
#define DRIVER_VERSION "3.0-1"
#define DRIVER_RELDATE "January 2015"
+#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
+
extern int mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \
@@ -100,6 +102,8 @@ int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev);
u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
+struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
+void mlx5_cq_tasklet_cb(unsigned long data);
void mlx5e_init(void);
void mlx5e_cleanup(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 9eeee0545f1c..32dea3524cee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -345,7 +345,6 @@ retry:
func_id, npages, err);
goto out_4k;
}
- dev->priv.fw_pages += npages;
err = mlx5_cmd_status_to_err(&out.hdr);
if (err) {
@@ -373,6 +372,33 @@ out_free:
return err;
}
+static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
+ struct mlx5_manage_pages_inbox *in, int in_size,
+ struct mlx5_manage_pages_outbox *out, int out_size)
+{
+ struct fw_page *fwp;
+ struct rb_node *p;
+ u32 npages;
+ u32 i = 0;
+
+ if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ return mlx5_cmd_exec_check_status(dev, (u32 *)in, in_size,
+ (u32 *)out, out_size);
+
+ npages = be32_to_cpu(in->num_entries);
+
+ p = rb_first(&dev->priv.page_root);
+ while (p && i < npages) {
+ fwp = rb_entry(p, struct fw_page, rb_node);
+ out->pas[i] = cpu_to_be64(fwp->addr);
+ p = rb_next(p);
+ i++;
+ }
+
+ out->num_entries = cpu_to_be32(i);
+ return 0;
+}
+
static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
int *nclaimed)
{
@@ -398,15 +424,9 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
in.func_id = cpu_to_be16(func_id);
in.num_entries = cpu_to_be32(npages);
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
- err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
+ err = reclaim_pages_cmd(dev, &in, sizeof(in), out, outlen);
if (err) {
- mlx5_core_err(dev, "failed reclaiming pages\n");
- goto out_free;
- }
- dev->priv.fw_pages -= npages;
-
- if (out->hdr.status) {
- err = mlx5_cmd_status_to_err(&out->hdr);
+ mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
goto out_free;
}
@@ -417,13 +437,15 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
err = -EINVAL;
goto out_free;
}
- if (nclaimed)
- *nclaimed = num_claimed;
for (i = 0; i < num_claimed; i++) {
addr = be64_to_cpu(out->pas[i]);
free_4k(dev, addr);
}
+
+ if (nclaimed)
+ *nclaimed = num_claimed;
+
dev->priv.fw_pages -= num_claimed;
if (func_id)
dev->priv.vfs_pages -= num_claimed;
@@ -514,14 +536,10 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
p = rb_first(&dev->priv.page_root);
if (p) {
fwp = rb_entry(p, struct fw_page, rb_node);
- if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
- free_4k(dev, fwp->addr);
- nclaimed = 1;
- } else {
- err = reclaim_pages(dev, fwp->func_id,
- optimal_reclaimed_pages(),
- &nclaimed);
- }
+ err = reclaim_pages(dev, fwp->func_id,
+ optimal_reclaimed_pages(),
+ &nclaimed);
+
if (err) {
mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
err);
@@ -536,6 +554,13 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
}
} while (p);
+ WARN(dev->priv.fw_pages,
+ "FW pages counter is %d after reclaiming all pages\n",
+ dev->priv.fw_pages);
+ WARN(dev->priv.vfs_pages,
+ "VFs FW pages counter is %d after reclaiming all pages\n",
+ dev->priv.vfs_pages);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 53cc1e2c693b..752c08127138 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -115,6 +115,19 @@ int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
+int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration)
+{
+ u32 out[MLX5_ST_SZ_DW(mlcr_reg)];
+ u32 in[MLX5_ST_SZ_DW(mlcr_reg)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(mlcr_reg, in, local_port, 1);
+ MLX5_SET(mlcr_reg, in, beacon_duration, beacon_duration);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MLCR, 0, 1);
+}
+
int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
u32 *proto_cap, int proto_mask)
{
@@ -189,15 +202,24 @@ int mlx5_query_port_proto_oper(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_proto_oper);
-int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
- int proto_mask)
+int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable,
+ u32 proto_admin, int proto_mask)
{
- u32 in[MLX5_ST_SZ_DW(ptys_reg)];
u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ u8 an_disable_admin;
+ u8 an_disable_cap;
+ u8 an_status;
+
+ mlx5_query_port_autoneg(dev, proto_mask, &an_status,
+ &an_disable_cap, &an_disable_admin);
+ if (!an_disable_cap && an_disable)
+ return -EPERM;
memset(in, 0, sizeof(in));
MLX5_SET(ptys_reg, in, local_port, 1);
+ MLX5_SET(ptys_reg, in, an_disable_admin, an_disable);
MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
if (proto_mask == MLX5_PTYS_EN)
MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
@@ -207,7 +229,19 @@ int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
return mlx5_core_access_reg(dev, in, sizeof(in), out,
sizeof(out), MLX5_REG_PTYS, 0, 1);
}
-EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
+EXPORT_SYMBOL_GPL(mlx5_set_port_ptys);
+
+/* This function should be used after setting a port register only */
+void mlx5_toggle_port_link(struct mlx5_core_dev *dev)
+{
+ enum mlx5_port_status ps;
+
+ mlx5_query_port_admin_status(dev, &ps);
+ mlx5_set_port_admin_status(dev, MLX5_PORT_DOWN);
+ if (ps == MLX5_PORT_UP)
+ mlx5_set_port_admin_status(dev, MLX5_PORT_UP);
+}
+EXPORT_SYMBOL_GPL(mlx5_toggle_port_link);
int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status status)
@@ -297,6 +331,82 @@ void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
+static int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num)
+{
+ u32 out[MLX5_ST_SZ_DW(pmlp_reg)];
+ u32 in[MLX5_ST_SZ_DW(pmlp_reg)];
+ int module_mapping;
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmlp_reg, in, local_port, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_PMLP, 0, 0);
+ if (err)
+ return err;
+
+ module_mapping = MLX5_GET(pmlp_reg, out, lane0_module_mapping);
+ *module_num = module_mapping & MLX5_EEPROM_IDENTIFIER_BYTE_MASK;
+
+ return 0;
+}
+
+int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
+ u16 offset, u16 size, u8 *data)
+{
+ u32 out[MLX5_ST_SZ_DW(mcia_reg)];
+ u32 in[MLX5_ST_SZ_DW(mcia_reg)];
+ int module_num;
+ u16 i2c_addr;
+ int status;
+ int err;
+ void *ptr = MLX5_ADDR_OF(mcia_reg, out, dword_0);
+
+ err = mlx5_query_module_num(dev, &module_num);
+ if (err)
+ return err;
+
+ memset(in, 0, sizeof(in));
+ size = min_t(int, size, MLX5_EEPROM_MAX_BYTES);
+
+ if (offset < MLX5_EEPROM_PAGE_LENGTH &&
+ offset + size > MLX5_EEPROM_PAGE_LENGTH)
+ /* Cross pages read, read until offset 256 in low page */
+ size -= offset + size - MLX5_EEPROM_PAGE_LENGTH;
+
+ i2c_addr = MLX5_I2C_ADDR_LOW;
+ if (offset >= MLX5_EEPROM_PAGE_LENGTH) {
+ i2c_addr = MLX5_I2C_ADDR_HIGH;
+ offset -= MLX5_EEPROM_PAGE_LENGTH;
+ }
+
+ MLX5_SET(mcia_reg, in, l, 0);
+ MLX5_SET(mcia_reg, in, module, module_num);
+ MLX5_SET(mcia_reg, in, i2c_device_address, i2c_addr);
+ MLX5_SET(mcia_reg, in, page_number, 0);
+ MLX5_SET(mcia_reg, in, device_address, offset);
+ MLX5_SET(mcia_reg, in, size, size);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MCIA, 0, 0);
+ if (err)
+ return err;
+
+ status = MLX5_GET(mcia_reg, out, status);
+ if (status) {
+ mlx5_core_err(dev, "query_mcia_reg failed: status: 0x%x\n",
+ status);
+ return -EIO;
+ }
+
+ memcpy(data, ptr, size);
+
+ return size;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_module_eeprom);
+
static int mlx5_query_port_pvlc(struct mlx5_core_dev *dev, u32 *pvlc,
int pvlc_size, u8 local_port)
{
@@ -429,6 +539,25 @@ int mlx5_query_port_pfc(struct mlx5_core_dev *dev, u8 *pfc_en_tx, u8 *pfc_en_rx)
}
EXPORT_SYMBOL_GPL(mlx5_query_port_pfc);
+void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask,
+ u8 *an_status,
+ u8 *an_disable_cap, u8 *an_disable_admin)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+
+ *an_status = 0;
+ *an_disable_cap = 0;
+ *an_disable_admin = 0;
+
+ if (mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask, 1))
+ return;
+
+ *an_status = MLX5_GET(ptys_reg, out, an_status);
+ *an_disable_cap = MLX5_GET(ptys_reg, out, an_disable_cap);
+ *an_disable_admin = MLX5_GET(ptys_reg, out, an_disable_admin);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_autoneg);
+
int mlx5_max_tc(struct mlx5_core_dev *mdev)
{
u8 num_tc = MLX5_CAP_GEN(mdev, max_tc) ? : 8;
@@ -607,3 +736,52 @@ int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode)
return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_port_wol);
+
+static int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out,
+ int outlen)
+{
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pcmr_reg, in, local_port, 1);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ outlen, MLX5_REG_PCMR, 0, 0);
+}
+
+static int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
+
+ return mlx5_core_access_reg(mdev, in, inlen, out,
+ sizeof(out), MLX5_REG_PCMR, 0, 1);
+}
+
+int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable)
+{
+ u32 in[MLX5_ST_SZ_DW(pcmr_reg)];
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(pcmr_reg, in, local_port, 1);
+ MLX5_SET(pcmr_reg, in, fcs_chk, enable);
+
+ return mlx5_set_ports_check(mdev, in, sizeof(in));
+}
+
+void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported,
+ bool *enabled)
+{
+ u32 out[MLX5_ST_SZ_DW(pcmr_reg)];
+ /* Default values for FW which do not support MLX5_REG_PCMR */
+ *supported = false;
+ *enabled = true;
+
+ if (!MLX5_CAP_GEN(mdev, ports_check))
+ return;
+
+ if (mlx5_query_ports_check(mdev, out, sizeof(out)))
+ return;
+
+ *supported = !!(MLX5_GET(pcmr_reg, out, fcs_cap));
+ *enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index def289375ecb..b82d65802d96 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -418,7 +418,7 @@ int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn)
if (out.hdr.status)
err = mlx5_cmd_status_to_err(&out.hdr);
else
- *xrcdn = be32_to_cpu(out.xrcdn);
+ *xrcdn = be32_to_cpu(out.xrcdn) & 0xffffff;
return err;
}
@@ -538,3 +538,71 @@ void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
mlx5_core_destroy_sq(dev, sq->qpn);
}
EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
+
+int mlx5_core_alloc_q_counter(struct mlx5_core_dev *dev, u16 *counter_id)
+{
+ u32 in[MLX5_ST_SZ_DW(alloc_q_counter_in)];
+ u32 out[MLX5_ST_SZ_DW(alloc_q_counter_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(alloc_q_counter_in, in, opcode, MLX5_CMD_OP_ALLOC_Q_COUNTER);
+ err = mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+ if (!err)
+ *counter_id = MLX5_GET(alloc_q_counter_out, out,
+ counter_set_id);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_core_alloc_q_counter);
+
+int mlx5_core_dealloc_q_counter(struct mlx5_core_dev *dev, u16 counter_id)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)];
+ u32 out[MLX5_ST_SZ_DW(dealloc_q_counter_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(dealloc_q_counter_in, in, opcode,
+ MLX5_CMD_OP_DEALLOC_Q_COUNTER);
+ MLX5_SET(dealloc_q_counter_in, in, counter_set_id, counter_id);
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out,
+ sizeof(out));
+}
+EXPORT_SYMBOL_GPL(mlx5_core_dealloc_q_counter);
+
+int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id,
+ int reset, void *out, int out_size)
+{
+ u32 in[MLX5_ST_SZ_DW(query_q_counter_in)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_q_counter_in, in, opcode, MLX5_CMD_OP_QUERY_Q_COUNTER);
+ MLX5_SET(query_q_counter_in, in, clear, reset);
+ MLX5_SET(query_q_counter_in, in, counter_set_id, counter_id);
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, out_size);
+}
+EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter);
+
+int mlx5_core_query_out_of_buffer(struct mlx5_core_dev *dev, u16 counter_id,
+ u32 *out_of_buffer)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_q_counter_out);
+ void *out;
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_core_query_q_counter(dev, counter_id, 0, out, outlen);
+ if (!err)
+ *out_of_buffer = MLX5_GET(query_q_counter_out, out,
+ out_of_buffer);
+
+ kfree(out);
+ return err;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rl.c b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
new file mode 100644
index 000000000000..c07c28bd3d55
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rl.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/cmd.h>
+#include "mlx5_core.h"
+
+/* Finds an entry where we can register the given rate
+ * If the rate already exists, return the entry where it is registered,
+ * otherwise return the first available entry.
+ * If the table is full, return NULL
+ */
+static struct mlx5_rl_entry *find_rl_entry(struct mlx5_rl_table *table,
+ u32 rate)
+{
+ struct mlx5_rl_entry *ret_entry = NULL;
+ bool empty_found = false;
+ int i;
+
+ for (i = 0; i < table->max_size; i++) {
+ if (table->rl_entry[i].rate == rate)
+ return &table->rl_entry[i];
+ if (!empty_found && !table->rl_entry[i].rate) {
+ empty_found = true;
+ ret_entry = &table->rl_entry[i];
+ }
+ }
+
+ return ret_entry;
+}
+
+static int mlx5_set_rate_limit_cmd(struct mlx5_core_dev *dev,
+ u32 rate, u16 index)
+{
+ u32 in[MLX5_ST_SZ_DW(set_rate_limit_in)];
+ u32 out[MLX5_ST_SZ_DW(set_rate_limit_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+ MLX5_SET(set_rate_limit_in, in, opcode,
+ MLX5_CMD_OP_SET_RATE_LIMIT);
+ MLX5_SET(set_rate_limit_in, in, rate_limit_index, index);
+ MLX5_SET(set_rate_limit_in, in, rate_limit, rate);
+
+ return mlx5_cmd_exec_check_status(dev, in, sizeof(in),
+ out, sizeof(out));
+}
+
+bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+
+ return (rate <= table->max_rate && rate >= table->min_rate);
+}
+EXPORT_SYMBOL(mlx5_rl_is_in_range);
+
+int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u32 rate, u16 *index)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ struct mlx5_rl_entry *entry;
+ int err = 0;
+
+ mutex_lock(&table->rl_lock);
+
+ if (!rate || !mlx5_rl_is_in_range(dev, rate)) {
+ mlx5_core_err(dev, "Invalid rate: %u, should be %u to %u\n",
+ rate, table->min_rate, table->max_rate);
+ err = -EINVAL;
+ goto out;
+ }
+
+ entry = find_rl_entry(table, rate);
+ if (!entry) {
+ mlx5_core_err(dev, "Max number of %u rates reached\n",
+ table->max_size);
+ err = -ENOSPC;
+ goto out;
+ }
+ if (entry->refcount) {
+ /* rate already configured */
+ entry->refcount++;
+ } else {
+ /* new rate limit */
+ err = mlx5_set_rate_limit_cmd(dev, rate, entry->index);
+ if (err) {
+ mlx5_core_err(dev, "Failed configuring rate: %u (%d)\n",
+ rate, err);
+ goto out;
+ }
+ entry->rate = rate;
+ entry->refcount = 1;
+ }
+ *index = entry->index;
+
+out:
+ mutex_unlock(&table->rl_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlx5_rl_add_rate);
+
+void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, u32 rate)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ struct mlx5_rl_entry *entry = NULL;
+
+ /* 0 is a reserved value for unlimited rate */
+ if (rate == 0)
+ return;
+
+ mutex_lock(&table->rl_lock);
+ entry = find_rl_entry(table, rate);
+ if (!entry || !entry->refcount) {
+ mlx5_core_warn(dev, "Rate %u is not configured\n", rate);
+ goto out;
+ }
+
+ entry->refcount--;
+ if (!entry->refcount) {
+ /* need to remove rate */
+ mlx5_set_rate_limit_cmd(dev, 0, entry->index);
+ entry->rate = 0;
+ }
+
+out:
+ mutex_unlock(&table->rl_lock);
+}
+EXPORT_SYMBOL(mlx5_rl_remove_rate);
+
+int mlx5_init_rl_table(struct mlx5_core_dev *dev)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ int i;
+
+ mutex_init(&table->rl_lock);
+ if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, packet_pacing)) {
+ table->max_size = 0;
+ return 0;
+ }
+
+ /* First entry is reserved for unlimited rate */
+ table->max_size = MLX5_CAP_QOS(dev, packet_pacing_rate_table_size) - 1;
+ table->max_rate = MLX5_CAP_QOS(dev, packet_pacing_max_rate);
+ table->min_rate = MLX5_CAP_QOS(dev, packet_pacing_min_rate);
+
+ table->rl_entry = kcalloc(table->max_size, sizeof(struct mlx5_rl_entry),
+ GFP_KERNEL);
+ if (!table->rl_entry)
+ return -ENOMEM;
+
+ /* The index represents the index in HW rate limit table
+ * Index 0 is reserved for unlimited rate
+ */
+ for (i = 0; i < table->max_size; i++)
+ table->rl_entry[i].index = i + 1;
+
+ /* Index 0 is reserved */
+ mlx5_core_info(dev, "Rate limit: %u rates are supported, range: %uMbps to %uMbps\n",
+ table->max_size,
+ table->min_rate >> 10,
+ table->max_rate >> 10);
+
+ return 0;
+}
+
+void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev)
+{
+ struct mlx5_rl_table *table = &dev->priv.rl_table;
+ int i;
+
+ /* Clear all configured rates */
+ for (i = 0; i < table->max_size; i++)
+ if (table->rl_entry[i].rate)
+ mlx5_set_rate_limit_cmd(dev, 0,
+ table->rl_entry[i].index);
+
+ kfree(dev->priv.rl_table.rl_entry);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 7b24386794f9..b380a6bc1f85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -140,7 +140,7 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
int err;
- mlx5_core_dbg(dev, "requsted num_vfs %d\n", num_vfs);
+ mlx5_core_dbg(dev, "requested num_vfs %d\n", num_vfs);
if (!mlx5_core_is_pf(dev))
return -EPERM;
@@ -167,7 +167,7 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
mlx5_core_init_vfs(dev, num_vfs);
#ifdef CONFIG_MLX5_CORE_EN
- mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs);
+ mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
#endif
return num_vfs;
@@ -209,7 +209,8 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
mlx5_core_init_vfs(dev, cur_vfs);
#ifdef CONFIG_MLX5_CORE_EN
if (cur_vfs)
- mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs);
+ mlx5_eswitch_enable_sriov(dev->priv.eswitch, cur_vfs,
+ SRIOV_LEGACY);
#endif
enable_vfs(dev, cur_vfs);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
index 04bc522605a0..c07f4d01b70e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/srq.c
@@ -63,12 +63,12 @@ void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type)
complete(&srq->free);
}
-static int get_pas_size(void *srqc)
+static int get_pas_size(struct mlx5_srq_attr *in)
{
- u32 log_page_size = MLX5_GET(srqc, srqc, log_page_size) + 12;
- u32 log_srq_size = MLX5_GET(srqc, srqc, log_srq_size);
- u32 log_rq_stride = MLX5_GET(srqc, srqc, log_rq_stride);
- u32 page_offset = MLX5_GET(srqc, srqc, page_offset);
+ u32 log_page_size = in->log_page_size + 12;
+ u32 log_srq_size = in->log_size;
+ u32 log_rq_stride = in->wqe_shift;
+ u32 page_offset = in->page_offset;
u32 po_quanta = 1 << (log_page_size - 6);
u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride);
u32 page_size = 1 << log_page_size;
@@ -78,57 +78,58 @@ static int get_pas_size(void *srqc)
return rq_num_pas * sizeof(u64);
}
-static void rmpc_srqc_reformat(void *srqc, void *rmpc, bool srqc_to_rmpc)
+static void set_wq(void *wq, struct mlx5_srq_attr *in)
{
- void *wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
-
- if (srqc_to_rmpc) {
- switch (MLX5_GET(srqc, srqc, state)) {
- case MLX5_SRQC_STATE_GOOD:
- MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
- break;
- case MLX5_SRQC_STATE_ERROR:
- MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_ERR);
- break;
- default:
- pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__,
- __LINE__, MLX5_GET(srqc, srqc, state));
- MLX5_SET(rmpc, rmpc, state, MLX5_GET(srqc, srqc, state));
- }
-
- MLX5_SET(wq, wq, wq_signature, MLX5_GET(srqc, srqc, wq_signature));
- MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(srqc, srqc, log_page_size));
- MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(srqc, srqc, log_rq_stride) + 4);
- MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(srqc, srqc, log_srq_size));
- MLX5_SET(wq, wq, page_offset, MLX5_GET(srqc, srqc, page_offset));
- MLX5_SET(wq, wq, lwm, MLX5_GET(srqc, srqc, lwm));
- MLX5_SET(wq, wq, pd, MLX5_GET(srqc, srqc, pd));
- MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(srqc, srqc, dbr_addr));
- } else {
- switch (MLX5_GET(rmpc, rmpc, state)) {
- case MLX5_RMPC_STATE_RDY:
- MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_GOOD);
- break;
- case MLX5_RMPC_STATE_ERR:
- MLX5_SET(srqc, srqc, state, MLX5_SRQC_STATE_ERROR);
- break;
- default:
- pr_warn("%s: %d: Unknown rmp state = 0x%x\n",
- __func__, __LINE__,
- MLX5_GET(rmpc, rmpc, state));
- MLX5_SET(srqc, srqc, state,
- MLX5_GET(rmpc, rmpc, state));
- }
-
- MLX5_SET(srqc, srqc, wq_signature, MLX5_GET(wq, wq, wq_signature));
- MLX5_SET(srqc, srqc, log_page_size, MLX5_GET(wq, wq, log_wq_pg_sz));
- MLX5_SET(srqc, srqc, log_rq_stride, MLX5_GET(wq, wq, log_wq_stride) - 4);
- MLX5_SET(srqc, srqc, log_srq_size, MLX5_GET(wq, wq, log_wq_sz));
- MLX5_SET(srqc, srqc, page_offset, MLX5_GET(wq, wq, page_offset));
- MLX5_SET(srqc, srqc, lwm, MLX5_GET(wq, wq, lwm));
- MLX5_SET(srqc, srqc, pd, MLX5_GET(wq, wq, pd));
- MLX5_SET64(srqc, srqc, dbr_addr, MLX5_GET64(wq, wq, dbr_addr));
- }
+ MLX5_SET(wq, wq, wq_signature, !!(in->flags
+ & MLX5_SRQ_FLAG_WQ_SIG));
+ MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size);
+ MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4);
+ MLX5_SET(wq, wq, log_wq_sz, in->log_size);
+ MLX5_SET(wq, wq, page_offset, in->page_offset);
+ MLX5_SET(wq, wq, lwm, in->lwm);
+ MLX5_SET(wq, wq, pd, in->pd);
+ MLX5_SET64(wq, wq, dbr_addr, in->db_record);
+}
+
+static void set_srqc(void *srqc, struct mlx5_srq_attr *in)
+{
+ MLX5_SET(srqc, srqc, wq_signature, !!(in->flags
+ & MLX5_SRQ_FLAG_WQ_SIG));
+ MLX5_SET(srqc, srqc, log_page_size, in->log_page_size);
+ MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift);
+ MLX5_SET(srqc, srqc, log_srq_size, in->log_size);
+ MLX5_SET(srqc, srqc, page_offset, in->page_offset);
+ MLX5_SET(srqc, srqc, lwm, in->lwm);
+ MLX5_SET(srqc, srqc, pd, in->pd);
+ MLX5_SET64(srqc, srqc, dbr_addr, in->db_record);
+ MLX5_SET(srqc, srqc, xrcd, in->xrcd);
+ MLX5_SET(srqc, srqc, cqn, in->cqn);
+}
+
+static void get_wq(void *wq, struct mlx5_srq_attr *in)
+{
+ if (MLX5_GET(wq, wq, wq_signature))
+ in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
+ in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz);
+ in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4;
+ in->log_size = MLX5_GET(wq, wq, log_wq_sz);
+ in->page_offset = MLX5_GET(wq, wq, page_offset);
+ in->lwm = MLX5_GET(wq, wq, lwm);
+ in->pd = MLX5_GET(wq, wq, pd);
+ in->db_record = MLX5_GET64(wq, wq, dbr_addr);
+}
+
+static void get_srqc(void *srqc, struct mlx5_srq_attr *in)
+{
+ if (MLX5_GET(srqc, srqc, wq_signature))
+ in->flags &= MLX5_SRQ_FLAG_WQ_SIG;
+ in->log_page_size = MLX5_GET(srqc, srqc, log_page_size);
+ in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride);
+ in->log_size = MLX5_GET(srqc, srqc, log_srq_size);
+ in->page_offset = MLX5_GET(srqc, srqc, page_offset);
+ in->lwm = MLX5_GET(srqc, srqc, lwm);
+ in->pd = MLX5_GET(srqc, srqc, pd);
+ in->db_record = MLX5_GET64(srqc, srqc, dbr_addr);
}
struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
@@ -149,19 +150,36 @@ struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn)
EXPORT_SYMBOL(mlx5_core_get_srq);
static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen)
+ struct mlx5_srq_attr *in)
{
- struct mlx5_create_srq_mbox_out out;
+ u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0};
+ void *create_in;
+ void *srqc;
+ void *pas;
+ int pas_size;
+ int inlen;
int err;
- memset(&out, 0, sizeof(out));
+ pas_size = get_pas_size(in);
+ inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size;
+ create_in = mlx5_vzalloc(inlen);
+ if (!create_in)
+ return -ENOMEM;
+
+ srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry);
+ pas = MLX5_ADDR_OF(create_srq_in, create_in, pas);
- in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ);
+ set_srqc(srqc, in);
+ memcpy(pas, in->pas, pas_size);
- err = mlx5_cmd_exec_check_status(dev, (u32 *)in, inlen, (u32 *)(&out),
- sizeof(out));
+ MLX5_SET(create_srq_in, create_in, opcode,
+ MLX5_CMD_OP_CREATE_SRQ);
- srq->srqn = be32_to_cpu(out.srqn) & 0xffffff;
+ err = mlx5_cmd_exec_check_status(dev, create_in, inlen, create_out,
+ sizeof(create_out));
+ kvfree(create_in);
+ if (!err)
+ srq->srqn = MLX5_GET(create_srq_out, create_out, srqn);
return err;
}
@@ -169,67 +187,75 @@ static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
static int destroy_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq)
{
- struct mlx5_destroy_srq_mbox_in in;
- struct mlx5_destroy_srq_mbox_out out;
+ u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0};
+ u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0};
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
+ MLX5_SET(destroy_srq_in, srq_in, opcode,
+ MLX5_CMD_OP_DESTROY_SRQ);
+ MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn);
- return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
- (u32 *)(&out), sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
+ srq_out, sizeof(srq_out));
}
static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
u16 lwm, int is_srq)
{
- struct mlx5_arm_srq_mbox_in in;
- struct mlx5_arm_srq_mbox_out out;
-
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
+ /* arm_srq structs missing using identical xrc ones */
+ u32 srq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0};
+ u32 srq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0};
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_ARM_RQ);
- in.hdr.opmod = cpu_to_be16(!!is_srq);
- in.srqn = cpu_to_be32(srq->srqn);
- in.lwm = cpu_to_be16(lwm);
+ MLX5_SET(arm_xrc_srq_in, srq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ);
+ MLX5_SET(arm_xrc_srq_in, srq_in, xrc_srqn, srq->srqn);
+ MLX5_SET(arm_xrc_srq_in, srq_in, lwm, lwm);
- return mlx5_cmd_exec_check_status(dev, (u32 *)(&in),
- sizeof(in), (u32 *)(&out),
- sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
+ srq_out, sizeof(srq_out));
}
static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_query_srq_mbox_out *out)
+ struct mlx5_srq_attr *out)
{
- struct mlx5_query_srq_mbox_in in;
+ u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0};
+ u32 *srq_out;
+ void *srqc;
+ int err;
- memset(&in, 0, sizeof(in));
+ srq_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_srq_out));
+ if (!srq_out)
+ return -ENOMEM;
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ);
- in.srqn = cpu_to_be32(srq->srqn);
+ MLX5_SET(query_srq_in, srq_in, opcode,
+ MLX5_CMD_OP_QUERY_SRQ);
+ MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn);
+ err = mlx5_cmd_exec_check_status(dev, srq_in, sizeof(srq_in),
+ srq_out,
+ MLX5_ST_SZ_BYTES(query_srq_out));
+ if (err)
+ goto out;
- return mlx5_cmd_exec_check_status(dev, (u32 *)(&in), sizeof(in),
- (u32 *)out, sizeof(*out));
+ srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry);
+ get_srqc(srqc, out);
+ if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
+out:
+ kvfree(srq_out);
+ return err;
}
static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in,
- int srq_inlen)
+ struct mlx5_srq_attr *in)
{
u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)];
void *create_in;
- void *srqc;
void *xrc_srqc;
void *pas;
int pas_size;
int inlen;
int err;
- srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
- pas_size = get_pas_size(srqc);
+ pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size;
create_in = mlx5_vzalloc(inlen);
if (!create_in)
@@ -239,7 +265,8 @@ static int create_xrc_srq_cmd(struct mlx5_core_dev *dev,
xrc_srq_context_entry);
pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas);
- memcpy(xrc_srqc, srqc, MLX5_ST_SZ_BYTES(srqc));
+ set_srqc(xrc_srqc, in);
+ MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index);
memcpy(pas, in->pas, pas_size);
MLX5_SET(create_xrc_srq_in, create_in, opcode,
MLX5_CMD_OP_CREATE_XRC_SRQ);
@@ -293,11 +320,10 @@ static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev,
static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
- struct mlx5_query_srq_mbox_out *out)
+ struct mlx5_srq_attr *out)
{
u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)];
u32 *xrcsrq_out;
- void *srqc;
void *xrc_srqc;
int err;
@@ -317,8 +343,9 @@ static int query_xrc_srq_cmd(struct mlx5_core_dev *dev,
xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out,
xrc_srq_context_entry);
- srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
- memcpy(srqc, xrc_srqc, MLX5_ST_SZ_BYTES(srqc));
+ get_srqc(xrc_srqc, out);
+ if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
out:
kvfree(xrcsrq_out);
@@ -326,26 +353,27 @@ out:
}
static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int srq_inlen)
+ struct mlx5_srq_attr *in)
{
void *create_in;
void *rmpc;
- void *srqc;
+ void *wq;
int pas_size;
int inlen;
int err;
- srqc = MLX5_ADDR_OF(create_srq_in, in, srq_context_entry);
- pas_size = get_pas_size(srqc);
+ pas_size = get_pas_size(in);
inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size;
create_in = mlx5_vzalloc(inlen);
if (!create_in)
return -ENOMEM;
rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx);
+ wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
+ MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY);
+ set_wq(wq, in);
memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size);
- rmpc_srqc_reformat(srqc, rmpc, true);
err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn);
@@ -390,11 +418,10 @@ static int arm_rmp_cmd(struct mlx5_core_dev *dev,
}
static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_query_srq_mbox_out *out)
+ struct mlx5_srq_attr *out)
{
u32 *rmp_out;
void *rmpc;
- void *srqc;
int err;
rmp_out = mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out));
@@ -405,9 +432,10 @@ static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
if (err)
goto out;
- srqc = MLX5_ADDR_OF(query_srq_out, out, srq_context_entry);
rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context);
- rmpc_srqc_reformat(srqc, rmpc, false);
+ get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out);
+ if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY)
+ out->flags |= MLX5_SRQ_FLAG_ERR;
out:
kvfree(rmp_out);
@@ -416,15 +444,14 @@ out:
static int create_srq_split(struct mlx5_core_dev *dev,
struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in,
- int inlen, int is_xrc)
+ struct mlx5_srq_attr *in)
{
if (!dev->issi)
- return create_srq_cmd(dev, srq, in, inlen);
+ return create_srq_cmd(dev, srq, in);
else if (srq->common.res == MLX5_RES_XSRQ)
- return create_xrc_srq_cmd(dev, srq, in, inlen);
+ return create_xrc_srq_cmd(dev, srq, in);
else
- return create_rmp_cmd(dev, srq, in, inlen);
+ return create_rmp_cmd(dev, srq, in);
}
static int destroy_srq_split(struct mlx5_core_dev *dev,
@@ -439,15 +466,17 @@ static int destroy_srq_split(struct mlx5_core_dev *dev,
}
int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_create_srq_mbox_in *in, int inlen,
- int is_xrc)
+ struct mlx5_srq_attr *in)
{
int err;
struct mlx5_srq_table *table = &dev->priv.srq_table;
- srq->common.res = is_xrc ? MLX5_RES_XSRQ : MLX5_RES_SRQ;
+ if (in->type == IB_SRQT_XRC)
+ srq->common.res = MLX5_RES_XSRQ;
+ else
+ srq->common.res = MLX5_RES_SRQ;
- err = create_srq_split(dev, srq, in, inlen, is_xrc);
+ err = create_srq_split(dev, srq, in);
if (err)
return err;
@@ -502,7 +531,7 @@ int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq)
EXPORT_SYMBOL(mlx5_core_destroy_srq);
int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq,
- struct mlx5_query_srq_mbox_out *out)
+ struct mlx5_srq_attr *out)
{
if (!dev->issi)
return query_srq_cmd(dev, srq, out);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
index 03a5093ffeb7..28274a6fbafe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -85,6 +85,7 @@ int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
return err;
}
+EXPORT_SYMBOL(mlx5_core_create_rq);
int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
{
@@ -110,6 +111,7 @@ void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
+EXPORT_SYMBOL(mlx5_core_destroy_rq);
int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out)
{
@@ -430,6 +432,7 @@ int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err;
}
+EXPORT_SYMBOL(mlx5_core_create_rqt);
int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
int inlen)
@@ -455,3 +458,4 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn)
mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
}
+EXPORT_SYMBOL(mlx5_core_destroy_rqt);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index b69dadcfb897..21365d06982b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -135,6 +135,18 @@ static int mlx5_modify_nic_vport_context(struct mlx5_core_dev *mdev, void *in,
return mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
}
+void mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
+ u8 *min_inline_mode)
+{
+ u32 out[MLX5_ST_SZ_DW(query_nic_vport_context_out)] = {0};
+
+ mlx5_query_nic_vport_context(mdev, 0, out, sizeof(out));
+
+ *min_inline_mode = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.min_wqe_inline_mode);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
+
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr)
{
@@ -508,6 +520,41 @@ int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid);
+int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
+ u32 vport, u64 node_guid)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *nic_vport_context;
+ void *in;
+ int err;
+
+ if (!vport)
+ return -EINVAL;
+ if (!MLX5_CAP_GEN(mdev, vport_group_manager))
+ return -EACCES;
+ if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
+ return -ENOTSUPP;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in,
+ field_select.node_guid, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, !!vport);
+
+ nic_vport_context = MLX5_ADDR_OF(modify_nic_vport_context_in,
+ in, nic_vport_context);
+ MLX5_SET64(nic_vport_context, nic_vport_context, node_guid, node_guid);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
u16 *qkey_viol_cntr)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
index f2fd1ef16da7..e25a73ed2981 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.c
@@ -72,8 +72,8 @@ static int mlx5e_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port)
u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)];
u32 out[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_out)];
- memset(&in, 0, sizeof(in));
- memset(&out, 0, sizeof(out));
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
MLX5_SET(delete_vxlan_udp_dport_in, in, opcode,
MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
@@ -105,6 +105,9 @@ static void mlx5e_vxlan_add_port(struct work_struct *work)
struct mlx5e_vxlan *vxlan;
int err;
+ if (mlx5e_vxlan_lookup_port(priv, port))
+ goto free_work;
+
if (mlx5e_vxlan_core_add_port_cmd(priv->mdev, port))
goto free_work;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
index 217ac530a514..5def12c048e3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vxlan.h
@@ -48,18 +48,12 @@ struct mlx5e_vxlan_work {
static inline bool mlx5e_vxlan_allowed(struct mlx5_core_dev *mdev)
{
- return IS_ENABLED(CONFIG_MLX5_CORE_EN_VXLAN) &&
- (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
+ return (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) &&
mlx5_core_is_pf(mdev));
}
-#ifdef CONFIG_MLX5_CORE_EN_VXLAN
void mlx5e_vxlan_init(struct mlx5e_priv *priv);
void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv);
-#else
-static inline void mlx5e_vxlan_init(struct mlx5e_priv *priv) {}
-static inline void mlx5e_vxlan_cleanup(struct mlx5e_priv *priv) {}
-#endif
void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, sa_family_t sa_family,
u16 port, int add);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index ce21ee5b2357..821a087c7ae2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -75,14 +75,14 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc_node(mdev, mlx5_wq_cyc_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
@@ -111,14 +111,14 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
err = mlx5_buf_alloc_node(mdev, mlx5_cqwq_get_byte_size(wq),
&wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
@@ -148,13 +148,14 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_db_alloc_node() failed, %d\n", err);
return err;
}
- err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+ err = mlx5_buf_alloc_node(mdev, mlx5_wq_ll_get_byte_size(wq),
+ &wq_ctrl->buf, param->buf_numa_node);
if (err) {
- mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ mlx5_core_warn(mdev, "mlx5_buf_alloc_node() failed, %d\n", err);
goto err_db_free;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index 2ad7f67854d5..5989f7cb5462 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -50,3 +50,11 @@ config MLXSW_SPECTRUM
To compile this driver as a module, choose M here: the
module will be called mlxsw_spectrum.
+
+config MLXSW_SPECTRUM_DCB
+ bool "Data Center Bridging (DCB) support"
+ depends on MLXSW_SPECTRUM && DCB
+ default y
+ ---help---
+ Say Y here if you want to use Data Center Bridging (DCB) in the
+ driver.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 584cac444852..d20ae1838a64 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -7,4 +7,6 @@ obj-$(CONFIG_MLXSW_SWITCHX2) += mlxsw_switchx2.o
mlxsw_switchx2-objs := switchx2.o
obj-$(CONFIG_MLXSW_SPECTRUM) += mlxsw_spectrum.o
mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
- spectrum_switchdev.o
+ spectrum_switchdev.o spectrum_router.o \
+ spectrum_kvdl.o
+mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/cmd.h b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
index cd63b8263688..28271bedd957 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/cmd.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/cmd.h
@@ -105,6 +105,7 @@ enum mlxsw_cmd_opcode {
MLXSW_CMD_OPCODE_SW2HW_EQ = 0x013,
MLXSW_CMD_OPCODE_HW2SW_EQ = 0x014,
MLXSW_CMD_OPCODE_QUERY_EQ = 0x015,
+ MLXSW_CMD_OPCODE_QUERY_RESOURCES = 0x101,
};
static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
@@ -144,6 +145,8 @@ static inline const char *mlxsw_cmd_opcode_str(u16 opcode)
return "HW2SW_EQ";
case MLXSW_CMD_OPCODE_QUERY_EQ:
return "QUERY_EQ";
+ case MLXSW_CMD_OPCODE_QUERY_RESOURCES:
+ return "QUERY_RESOURCES";
default:
return "*UNKNOWN*";
}
@@ -500,6 +503,35 @@ static inline int mlxsw_cmd_unmap_fa(struct mlxsw_core *mlxsw_core)
return mlxsw_cmd_exec_none(mlxsw_core, MLXSW_CMD_OPCODE_UNMAP_FA, 0, 0);
}
+/* QUERY_RESOURCES - Query chip resources
+ * --------------------------------------
+ * OpMod == 0 (N/A) , INMmod is index
+ * ----------------------------------
+ * The QUERY_RESOURCES command retrieves information related to chip resources
+ * by resource ID. Every command returns 32 entries. INmod is being use as base.
+ * for example, index 1 will return entries 32-63. When the tables end and there
+ * are no more sources in the table, will return resource id 0xFFF to indicate
+ * it.
+ */
+static inline int mlxsw_cmd_query_resources(struct mlxsw_core *mlxsw_core,
+ char *out_mbox, int index)
+{
+ return mlxsw_cmd_exec_out(mlxsw_core, MLXSW_CMD_OPCODE_QUERY_RESOURCES,
+ 0, index, false, out_mbox,
+ MLXSW_CMD_MBOX_SIZE);
+}
+
+/* cmd_mbox_query_resource_id
+ * The resource id. 0xFFFF indicates table's end.
+ */
+MLXSW_ITEM32_INDEXED(cmd_mbox, query_resource, id, 0x00, 16, 16, 0x8, 0, false);
+
+/* cmd_mbox_query_resource_data
+ * The resource
+ */
+MLXSW_ITEM64_INDEXED(cmd_mbox, query_resource, data,
+ 0x00, 0, 40, 0x8, 0, false);
+
/* CONFIG_PROFILE (Set) - Configure Switch Profile
* ------------------------------
* OpMod == 1 (Set), INMmod == 0 (N/A)
@@ -607,6 +639,24 @@ MLXSW_ITEM32(cmd_mbox, config_profile,
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_ar_sec, 0x0C, 15, 1);
+/* cmd_mbox_config_set_kvd_linear_size
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_linear_size, 0x0C, 24, 1);
+
+/* cmd_mbox_config_set_kvd_hash_single_size
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_single_size, 0x0C, 25, 1);
+
+/* cmd_mbox_config_set_kvd_hash_double_size
+ * Capability bit. Setting a bit to 1 configures the profile
+ * according to the mailbox contents.
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, set_kvd_hash_double_size, 0x0C, 26, 1);
+
/* cmd_mbox_config_profile_max_vepa_channels
* Maximum number of VEPA channels per port (0 through 16)
* 0 - multi-channel VEPA is disabled
@@ -733,6 +783,31 @@ MLXSW_ITEM32(cmd_mbox, config_profile, adaptive_routing_group_cap, 0x4C, 0, 16);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, arn, 0x50, 31, 1);
+/* cmd_mbox_config_kvd_linear_size
+ * KVD Linear Size
+ * Valid for Spectrum only
+ * Allowed values are 128*N where N=0 or higher
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, kvd_linear_size, 0x54, 0, 24);
+
+/* cmd_mbox_config_kvd_hash_single_size
+ * KVD Hash single-entries size
+ * Valid for Spectrum only
+ * Allowed values are 128*N where N=0 or higher
+ * Must be greater or equal to cap_min_kvd_hash_single_size
+ * Must be smaller or equal to cap_kvd_size - kvd_linear_size
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_single_size, 0x58, 0, 24);
+
+/* cmd_mbox_config_kvd_hash_double_size
+ * KVD Hash double-entries size (units of single-size entries)
+ * Valid for Spectrum only
+ * Allowed values are 128*N where N=0 or higher
+ * Must be either 0 or greater or equal to cap_min_kvd_hash_double_size
+ * Must be smaller or equal to cap_kvd_size - kvd_linear_size
+ */
+MLXSW_ITEM32(cmd_mbox, config_profile, kvd_hash_double_size, 0x5C, 0, 24);
+
/* cmd_mbox_config_profile_swid_config_mask
* Modify Switch Partition Configuration mask. When set, the configu-
* ration value for the Switch Partition are taken from the mailbox.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f69f6280519f..068ee65a960b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -44,7 +44,7 @@
#include <linux/seq_file.h>
#include <linux/u64_stats_sync.h>
#include <linux/netdevice.h>
-#include <linux/wait.h>
+#include <linux/completion.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/types.h>
@@ -55,8 +55,10 @@
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
#include <net/devlink.h>
+#include <trace/events/devlink.h>
#include "core.h"
#include "item.h"
@@ -73,6 +75,8 @@ static const char mlxsw_core_driver_name[] = "mlxsw_core";
static struct dentry *mlxsw_core_dbg_root;
+static struct workqueue_struct *mlxsw_wq;
+
struct mlxsw_core_pcpu_stats {
u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
@@ -93,11 +97,9 @@ struct mlxsw_core {
struct list_head rx_listener_list;
struct list_head event_listener_list;
struct {
- struct sk_buff *resp_skb;
- u64 tid;
- wait_queue_head_t wait;
- bool trans_active;
- struct mutex lock; /* One EMAD transaction at a time. */
+ atomic64_t tid;
+ struct list_head trans_list;
+ spinlock_t trans_list_lock; /* protects trans_list writes */
bool use_emad;
} emad;
struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
@@ -109,11 +111,18 @@ struct mlxsw_core {
struct {
u8 *mapping; /* lag_id+port_index to local_port mapping */
} lag;
+ struct mlxsw_resources resources;
struct mlxsw_hwmon *hwmon;
unsigned long driver_priv[0];
/* driver_priv has to be always the last item */
};
+void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
+{
+ return mlxsw_core->driver_priv;
+}
+EXPORT_SYMBOL(mlxsw_core_driver_priv);
+
struct mlxsw_rx_listener_item {
struct list_head list;
struct mlxsw_rx_listener rxl;
@@ -284,7 +293,7 @@ static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
static void mlxsw_emad_pack_op_tlv(char *op_tlv,
const struct mlxsw_reg_info *reg,
enum mlxsw_core_reg_access_type type,
- struct mlxsw_core *mlxsw_core)
+ u64 tid)
{
mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
@@ -300,7 +309,7 @@ static void mlxsw_emad_pack_op_tlv(char *op_tlv,
MLXSW_EMAD_OP_TLV_METHOD_WRITE);
mlxsw_emad_op_tlv_class_set(op_tlv,
MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
- mlxsw_emad_op_tlv_tid_set(op_tlv, mlxsw_core->emad.tid);
+ mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
}
static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
@@ -322,7 +331,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
const struct mlxsw_reg_info *reg,
char *payload,
enum mlxsw_core_reg_access_type type,
- struct mlxsw_core *mlxsw_core)
+ u64 tid)
{
char *buf;
@@ -333,7 +342,7 @@ static void mlxsw_emad_construct(struct sk_buff *skb,
mlxsw_emad_pack_reg_tlv(buf, reg, payload);
buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
- mlxsw_emad_pack_op_tlv(buf, reg, type, mlxsw_core);
+ mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
mlxsw_emad_construct_eth_hdr(skb);
}
@@ -370,58 +379,16 @@ static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
}
-#define MLXSW_EMAD_TIMEOUT_MS 200
-
-static int __mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
-{
- int err;
- int ret;
-
- mlxsw_core->emad.trans_active = true;
-
- err = mlxsw_core_skb_transmit(mlxsw_core->driver_priv, skb, tx_info);
- if (err) {
- dev_err(mlxsw_core->bus_info->dev, "Failed to transmit EMAD (tid=%llx)\n",
- mlxsw_core->emad.tid);
- dev_kfree_skb(skb);
- goto trans_inactive_out;
- }
-
- ret = wait_event_timeout(mlxsw_core->emad.wait,
- !(mlxsw_core->emad.trans_active),
- msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS));
- if (!ret) {
- dev_warn(mlxsw_core->bus_info->dev, "EMAD timed-out (tid=%llx)\n",
- mlxsw_core->emad.tid);
- err = -EIO;
- goto trans_inactive_out;
- }
-
- return 0;
-
-trans_inactive_out:
- mlxsw_core->emad.trans_active = false;
- return err;
-}
-
-static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
- char *op_tlv)
+static int mlxsw_emad_process_status(char *op_tlv,
+ enum mlxsw_emad_op_tlv_status *p_status)
{
- enum mlxsw_emad_op_tlv_status status;
- u64 tid;
-
- status = mlxsw_emad_op_tlv_status_get(op_tlv);
- tid = mlxsw_emad_op_tlv_tid_get(op_tlv);
+ *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
- switch (status) {
+ switch (*p_status) {
case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
return 0;
case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
- dev_warn(mlxsw_core->bus_info->dev, "Reg access status again (tid=%llx,status=%x(%s))\n",
- tid, status, mlxsw_emad_op_tlv_status_str(status));
return -EAGAIN;
case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
@@ -432,70 +399,157 @@ static int mlxsw_emad_process_status(struct mlxsw_core *mlxsw_core,
case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
default:
- dev_err(mlxsw_core->bus_info->dev, "Reg access status failed (tid=%llx,status=%x(%s))\n",
- tid, status, mlxsw_emad_op_tlv_status_str(status));
return -EIO;
}
}
-static int mlxsw_emad_process_status_skb(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb)
+static int
+mlxsw_emad_process_status_skb(struct sk_buff *skb,
+ enum mlxsw_emad_op_tlv_status *p_status)
+{
+ return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
+}
+
+struct mlxsw_reg_trans {
+ struct list_head list;
+ struct list_head bulk_list;
+ struct mlxsw_core *core;
+ struct sk_buff *tx_skb;
+ struct mlxsw_tx_info tx_info;
+ struct delayed_work timeout_dw;
+ unsigned int retries;
+ u64 tid;
+ struct completion completion;
+ atomic_t active;
+ mlxsw_reg_trans_cb_t *cb;
+ unsigned long cb_priv;
+ const struct mlxsw_reg_info *reg;
+ enum mlxsw_core_reg_access_type type;
+ int err;
+ enum mlxsw_emad_op_tlv_status emad_status;
+ struct rcu_head rcu;
+};
+
+#define MLXSW_EMAD_TIMEOUT_MS 200
+
+static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
{
- return mlxsw_emad_process_status(mlxsw_core, mlxsw_emad_op_tlv(skb));
+ unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
+
+ mlxsw_core_schedule_dw(&trans->timeout_dw, timeout);
}
static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
- struct sk_buff *skb,
- const struct mlxsw_tx_info *tx_info)
+ struct mlxsw_reg_trans *trans)
{
- struct sk_buff *trans_skb;
- int n_retry;
+ struct sk_buff *skb;
int err;
- n_retry = 0;
-retry:
- /* We copy the EMAD to a new skb, since we might need
- * to retransmit it in case of failure.
- */
- trans_skb = skb_copy(skb, GFP_KERNEL);
- if (!trans_skb) {
- err = -ENOMEM;
- goto out;
+ skb = skb_copy(trans->tx_skb, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
+ skb->data + mlxsw_core->driver->txhdr_len,
+ skb->len - mlxsw_core->driver->txhdr_len);
+
+ atomic_set(&trans->active, 1);
+ err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
+ if (err) {
+ dev_kfree_skb(skb);
+ return err;
}
+ mlxsw_emad_trans_timeout_schedule(trans);
+ return 0;
+}
- err = __mlxsw_emad_transmit(mlxsw_core, trans_skb, tx_info);
- if (!err) {
- struct sk_buff *resp_skb = mlxsw_core->emad.resp_skb;
+static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
+{
+ struct mlxsw_core *mlxsw_core = trans->core;
+
+ dev_kfree_skb(trans->tx_skb);
+ spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_del_rcu(&trans->list);
+ spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+ trans->err = err;
+ complete(&trans->completion);
+}
+
+static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_reg_trans *trans)
+{
+ int err;
- err = mlxsw_emad_process_status_skb(mlxsw_core, resp_skb);
- if (err)
- dev_kfree_skb(resp_skb);
- if (!err || err != -EAGAIN)
- goto out;
+ if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
+ trans->retries++;
+ err = mlxsw_emad_transmit(trans->core, trans);
+ if (err == 0)
+ return;
+ } else {
+ err = -EIO;
}
- if (n_retry++ < MLXSW_EMAD_MAX_RETRY)
- goto retry;
+ mlxsw_emad_trans_finish(trans, err);
+}
-out:
- dev_kfree_skb(skb);
- mlxsw_core->emad.tid++;
- return err;
+static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
+{
+ struct mlxsw_reg_trans *trans = container_of(work,
+ struct mlxsw_reg_trans,
+ timeout_dw.work);
+
+ if (!atomic_dec_and_test(&trans->active))
+ return;
+
+ mlxsw_emad_transmit_retry(trans->core, trans);
}
+static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_reg_trans *trans,
+ struct sk_buff *skb)
+{
+ int err;
+
+ if (!atomic_dec_and_test(&trans->active))
+ return;
+
+ err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
+ if (err == -EAGAIN) {
+ mlxsw_emad_transmit_retry(mlxsw_core, trans);
+ } else {
+ if (err == 0) {
+ char *op_tlv = mlxsw_emad_op_tlv(skb);
+
+ if (trans->cb)
+ trans->cb(mlxsw_core,
+ mlxsw_emad_reg_payload(op_tlv),
+ trans->reg->len, trans->cb_priv);
+ }
+ mlxsw_emad_trans_finish(trans, err);
+ }
+}
+
+/* called with rcu read lock held */
static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
void *priv)
{
struct mlxsw_core *mlxsw_core = priv;
+ struct mlxsw_reg_trans *trans;
- if (mlxsw_emad_is_resp(skb) &&
- mlxsw_core->emad.trans_active &&
- mlxsw_emad_get_tid(skb) == mlxsw_core->emad.tid) {
- mlxsw_core->emad.resp_skb = skb;
- mlxsw_core->emad.trans_active = false;
- wake_up(&mlxsw_core->emad.wait);
- } else {
- dev_kfree_skb(skb);
+ trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
+ skb->data, skb->len);
+
+ if (!mlxsw_emad_is_resp(skb))
+ goto free_skb;
+
+ list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
+ if (mlxsw_emad_get_tid(skb) == trans->tid) {
+ mlxsw_emad_process_response(mlxsw_core, trans, skb);
+ break;
+ }
}
+
+free_skb:
+ dev_kfree_skb(skb);
}
static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
@@ -522,18 +576,19 @@ static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
{
+ u64 tid;
int err;
/* Set the upper 32 bits of the transaction ID field to a random
* number. This allows us to discard EMADs addressed to other
* devices.
*/
- get_random_bytes(&mlxsw_core->emad.tid, 4);
- mlxsw_core->emad.tid = mlxsw_core->emad.tid << 32;
+ get_random_bytes(&tid, 4);
+ tid <<= 32;
+ atomic64_set(&mlxsw_core->emad.tid, tid);
- init_waitqueue_head(&mlxsw_core->emad.wait);
- mlxsw_core->emad.trans_active = false;
- mutex_init(&mlxsw_core->emad.lock);
+ INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
+ spin_lock_init(&mlxsw_core->emad.trans_list_lock);
err = mlxsw_core_rx_listener_register(mlxsw_core,
&mlxsw_emad_rx_listener,
@@ -591,6 +646,59 @@ static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
return skb;
}
+static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg,
+ char *payload,
+ enum mlxsw_core_reg_access_type type,
+ struct mlxsw_reg_trans *trans,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb,
+ unsigned long cb_priv, u64 tid)
+{
+ struct sk_buff *skb;
+ int err;
+
+ dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
+ trans->tid, reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
+ skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
+ if (!skb)
+ return -ENOMEM;
+
+ list_add_tail(&trans->bulk_list, bulk_list);
+ trans->core = mlxsw_core;
+ trans->tx_skb = skb;
+ trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
+ trans->tx_info.is_emad = true;
+ INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
+ trans->tid = tid;
+ init_completion(&trans->completion);
+ trans->cb = cb;
+ trans->cb_priv = cb_priv;
+ trans->reg = reg;
+ trans->type = type;
+
+ mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
+ mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
+
+ spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
+ spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+ err = mlxsw_emad_transmit(mlxsw_core, trans);
+ if (err)
+ goto err_out;
+ return 0;
+
+err_out:
+ spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_del_rcu(&trans->list);
+ spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
+ list_del(&trans->bulk_list);
+ dev_kfree_skb(trans->tx_skb);
+ return err;
+}
+
/*****************
* Core functions
*****************/
@@ -680,24 +788,6 @@ static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
.llseek = seq_lseek
};
-static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
- const char *buf, size_t size)
-{
- __be32 *m = (__be32 *) buf;
- int i;
- int count = size / sizeof(__be32);
-
- for (i = count - 1; i >= 0; i--)
- if (m[i])
- break;
- i++;
- count = i ? i : 1;
- for (i = 0; i < count; i += 4)
- dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
- i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
- be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
-}
-
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
{
spin_lock(&mlxsw_core_driver_list_lock);
@@ -795,8 +885,7 @@ static int mlxsw_devlink_port_split(struct devlink *devlink,
return -EINVAL;
if (!mlxsw_core->driver->port_split)
return -EOPNOTSUPP;
- return mlxsw_core->driver->port_split(mlxsw_core->driver_priv,
- port_index, count);
+ return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
}
static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
@@ -808,13 +897,171 @@ static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
return -EINVAL;
if (!mlxsw_core->driver->port_unsplit)
return -EOPNOTSUPP;
- return mlxsw_core->driver->port_unsplit(mlxsw_core->driver_priv,
- port_index);
+ return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
+}
+
+static int
+mlxsw_devlink_sb_pool_get(struct devlink *devlink,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_pool_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
+ pool_index, pool_info);
+}
+
+static int
+mlxsw_devlink_sb_pool_set(struct devlink *devlink,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_pool_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
+ pool_index, size, threshold_type);
+}
+
+static void *__dl_port(struct devlink_port *devlink_port)
+{
+ return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
+}
+
+static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_port_pool_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
+ pool_index, p_threshold);
+}
+
+static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_port_pool_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
+ pool_index, threshold);
+}
+
+static int
+mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_tc_pool_bind_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
+ tc_index, pool_type,
+ p_pool_index, p_threshold);
+}
+
+static int
+mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_tc_pool_bind_set)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
+ tc_index, pool_type,
+ pool_index, threshold);
+}
+
+static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
+ unsigned int sb_index)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_occ_snapshot)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
+}
+
+static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
+ unsigned int sb_index)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+
+ if (!mlxsw_driver->sb_occ_max_clear)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
+}
+
+static int
+mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_occ_port_pool_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
+ pool_index, p_cur, p_max);
+}
+
+static int
+mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
+ struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
+ struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
+
+ if (!mlxsw_driver->sb_occ_tc_port_bind_get)
+ return -EOPNOTSUPP;
+ return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
+ sb_index, tc_index,
+ pool_type, p_cur, p_max);
}
static const struct devlink_ops mlxsw_devlink_ops = {
- .port_split = mlxsw_devlink_port_split,
- .port_unsplit = mlxsw_devlink_port_unsplit,
+ .port_split = mlxsw_devlink_port_split,
+ .port_unsplit = mlxsw_devlink_port_unsplit,
+ .sb_pool_get = mlxsw_devlink_sb_pool_get,
+ .sb_pool_set = mlxsw_devlink_sb_pool_set,
+ .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
};
int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
@@ -864,7 +1111,8 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
}
}
- err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile);
+ err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
+ &mlxsw_core->resources);
if (err)
goto err_bus_init;
@@ -872,16 +1120,15 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_emad_init;
- err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
- if (err)
- goto err_hwmon_init;
-
err = devlink_register(devlink, mlxsw_bus_info->dev);
if (err)
goto err_devlink_register;
- err = mlxsw_driver->init(mlxsw_core->driver_priv, mlxsw_core,
- mlxsw_bus_info);
+ err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
+ if (err)
+ goto err_hwmon_init;
+
+ err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
if (err)
goto err_driver_init;
@@ -892,11 +1139,11 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
return 0;
err_debugfs_init:
- mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+ mlxsw_core->driver->fini(mlxsw_core);
err_driver_init:
+err_hwmon_init:
devlink_unregister(devlink);
err_devlink_register:
-err_hwmon_init:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
mlxsw_bus->fini(bus_priv);
@@ -918,7 +1165,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
struct devlink *devlink = priv_to_devlink(mlxsw_core);
mlxsw_core_debugfs_fini(mlxsw_core);
- mlxsw_core->driver->fini(mlxsw_core->driver_priv);
+ mlxsw_core->driver->fini(mlxsw_core);
devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
mlxsw_core->bus->fini(mlxsw_core->bus_priv);
@@ -929,26 +1176,17 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
}
EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
-static struct mlxsw_core *__mlxsw_core_get(void *driver_priv)
-{
- return container_of(driver_priv, struct mlxsw_core, driver_priv);
-}
-
-bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
const struct mlxsw_tx_info *tx_info)
{
- struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
-
return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
tx_info);
}
EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
-int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{
- struct mlxsw_core *mlxsw_core = __mlxsw_core_get(driver_priv);
-
return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
tx_info);
}
@@ -1108,56 +1346,112 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
+static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
+{
+ return atomic64_inc_return(&mlxsw_core->emad.tid);
+}
+
static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg,
char *payload,
- enum mlxsw_core_reg_access_type type)
+ enum mlxsw_core_reg_access_type type,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb,
+ unsigned long cb_priv)
{
+ u64 tid = mlxsw_core_tid_get(mlxsw_core);
+ struct mlxsw_reg_trans *trans;
int err;
- char *op_tlv;
- struct sk_buff *skb;
- struct mlxsw_tx_info tx_info = {
- .local_port = MLXSW_PORT_CPU_PORT,
- .is_emad = true,
- };
- skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
- if (!skb)
+ trans = kzalloc(sizeof(*trans), GFP_KERNEL);
+ if (!trans)
return -ENOMEM;
- mlxsw_emad_construct(skb, reg, payload, type, mlxsw_core);
- mlxsw_core->driver->txhdr_construct(skb, &tx_info);
+ err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
+ bulk_list, cb, cb_priv, tid);
+ if (err) {
+ kfree(trans);
+ return err;
+ }
+ return 0;
+}
- dev_dbg(mlxsw_core->bus_info->dev, "EMAD send (tid=%llx)\n",
- mlxsw_core->emad.tid);
- mlxsw_core_buf_dump_dbg(mlxsw_core, skb->data, skb->len);
+int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
+{
+ return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
+ bulk_list, cb, cb_priv);
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_query);
- err = mlxsw_emad_transmit(mlxsw_core, skb, &tx_info);
- if (!err) {
- op_tlv = mlxsw_emad_op_tlv(mlxsw_core->emad.resp_skb);
- memcpy(payload, mlxsw_emad_reg_payload(op_tlv),
- reg->len);
+int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
+{
+ return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
+ MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
+ bulk_list, cb, cb_priv);
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_write);
- dev_dbg(mlxsw_core->bus_info->dev, "EMAD recv (tid=%llx)\n",
- mlxsw_core->emad.tid - 1);
- mlxsw_core_buf_dump_dbg(mlxsw_core,
- mlxsw_core->emad.resp_skb->data,
- mlxsw_core->emad.resp_skb->len);
+static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
+{
+ struct mlxsw_core *mlxsw_core = trans->core;
+ int err;
- dev_kfree_skb(mlxsw_core->emad.resp_skb);
- }
+ wait_for_completion(&trans->completion);
+ cancel_delayed_work_sync(&trans->timeout_dw);
+ err = trans->err;
+ if (trans->retries)
+ dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
+ trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
+ if (err)
+ dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
+ trans->tid, trans->reg->id,
+ mlxsw_reg_id_str(trans->reg->id),
+ mlxsw_core_reg_access_type_str(trans->type),
+ trans->emad_status,
+ mlxsw_emad_op_tlv_status_str(trans->emad_status));
+
+ list_del(&trans->bulk_list);
+ kfree_rcu(trans, rcu);
return err;
}
+int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
+{
+ struct mlxsw_reg_trans *trans;
+ struct mlxsw_reg_trans *tmp;
+ int sum_err = 0;
+ int err;
+
+ list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
+ err = mlxsw_reg_trans_wait(trans);
+ if (err && sum_err == 0)
+ sum_err = err; /* first error to be returned */
+ }
+ return sum_err;
+}
+EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
+
static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg,
char *payload,
enum mlxsw_core_reg_access_type type)
{
+ enum mlxsw_emad_op_tlv_status status;
int err, n_retry;
char *in_mbox, *out_mbox, *tmp;
+ dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
+ reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
+
in_mbox = mlxsw_cmd_mbox_alloc();
if (!in_mbox)
return -ENOMEM;
@@ -1168,7 +1462,8 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
goto free_in_mbox;
}
- mlxsw_emad_pack_op_tlv(in_mbox, reg, type, mlxsw_core);
+ mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
+ mlxsw_core_tid_get(mlxsw_core));
tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
@@ -1176,60 +1471,61 @@ static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
retry:
err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
if (!err) {
- err = mlxsw_emad_process_status(mlxsw_core, out_mbox);
- if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
- goto retry;
+ err = mlxsw_emad_process_status(out_mbox, &status);
+ if (err) {
+ if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
+ goto retry;
+ dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
+ status, mlxsw_emad_op_tlv_status_str(status));
+ }
}
if (!err)
memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
reg->len);
- mlxsw_core->emad.tid++;
mlxsw_cmd_mbox_free(out_mbox);
free_in_mbox:
mlxsw_cmd_mbox_free(in_mbox);
+ if (err)
+ dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
+ reg->id, mlxsw_reg_id_str(reg->id),
+ mlxsw_core_reg_access_type_str(type));
return err;
}
+static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
+ char *payload, size_t payload_len,
+ unsigned long cb_priv)
+{
+ char *orig_payload = (char *) cb_priv;
+
+ memcpy(orig_payload, payload, payload_len);
+}
+
static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg,
char *payload,
enum mlxsw_core_reg_access_type type)
{
- u64 cur_tid;
+ LIST_HEAD(bulk_list);
int err;
- if (mutex_lock_interruptible(&mlxsw_core->emad.lock)) {
- dev_err(mlxsw_core->bus_info->dev, "Reg access interrupted (reg_id=%x(%s),type=%s)\n",
- reg->id, mlxsw_reg_id_str(reg->id),
- mlxsw_core_reg_access_type_str(type));
- return -EINTR;
- }
-
- cur_tid = mlxsw_core->emad.tid;
- dev_dbg(mlxsw_core->bus_info->dev, "Reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
- cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
- mlxsw_core_reg_access_type_str(type));
-
/* During initialization EMAD interface is not available to us,
* so we default to command interface. We switch to EMAD interface
* after setting the appropriate traps.
*/
if (!mlxsw_core->emad.use_emad)
- err = mlxsw_core_reg_access_cmd(mlxsw_core, reg,
- payload, type);
- else
- err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+ return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
payload, type);
+ err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
+ payload, type, &bulk_list,
+ mlxsw_core_reg_access_cb,
+ (unsigned long) payload);
if (err)
- dev_err(mlxsw_core->bus_info->dev, "Reg access failed (tid=%llx,reg_id=%x(%s),type=%s)\n",
- cur_tid, reg->id, mlxsw_reg_id_str(reg->id),
- mlxsw_core_reg_access_type_str(type));
-
- mutex_unlock(&mlxsw_core->emad.lock);
- return err;
+ return err;
+ return mlxsw_reg_trans_bulk_wait(&bulk_list);
}
int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
@@ -1358,6 +1654,52 @@ void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
+struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core)
+{
+ return &mlxsw_core->resources;
+}
+EXPORT_SYMBOL(mlxsw_core_resources_get);
+
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
+ struct net_device *dev, bool split, u32 split_group)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ if (split)
+ devlink_port_split_set(devlink_port, split_group);
+ devlink_port_type_eth_set(devlink_port, dev);
+ return devlink_port_register(devlink, devlink_port, local_port);
+}
+EXPORT_SYMBOL(mlxsw_core_port_init);
+
+void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port)
+{
+ struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
+
+ devlink_port_unregister(devlink_port);
+}
+EXPORT_SYMBOL(mlxsw_core_port_fini);
+
+static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
+ const char *buf, size_t size)
+{
+ __be32 *m = (__be32 *) buf;
+ int i;
+ int count = size / sizeof(__be32);
+
+ for (i = count - 1; i >= 0; i--)
+ if (m[i])
+ break;
+ i++;
+ count = i ? i : 1;
+ for (i = 0; i < count; i += 4)
+ dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
+ i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
+ be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
+}
+
int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
u32 in_mod, bool out_mbox_direct,
char *in_mbox, size_t in_mbox_size,
@@ -1400,17 +1742,35 @@ int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
}
EXPORT_SYMBOL(mlxsw_cmd_exec);
+int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
+{
+ return queue_delayed_work(mlxsw_wq, dwork, delay);
+}
+EXPORT_SYMBOL(mlxsw_core_schedule_dw);
+
static int __init mlxsw_core_module_init(void)
{
- mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
- if (!mlxsw_core_dbg_root)
+ int err;
+
+ mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
+ if (!mlxsw_wq)
return -ENOMEM;
+ mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
+ if (!mlxsw_core_dbg_root) {
+ err = -ENOMEM;
+ goto err_debugfs_create_dir;
+ }
return 0;
+
+err_debugfs_create_dir:
+ destroy_workqueue(mlxsw_wq);
+ return err;
}
static void __exit mlxsw_core_module_exit(void)
{
debugfs_remove_recursive(mlxsw_core_dbg_root);
+ destroy_workqueue(mlxsw_wq);
}
module_init(mlxsw_core_module_init);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index c73d1c0792a6..d3476ead9982 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -43,6 +43,8 @@
#include <linux/gfp.h>
#include <linux/types.h>
#include <linux/skbuff.h>
+#include <linux/workqueue.h>
+#include <net/devlink.h>
#include "trap.h"
#include "reg.h"
@@ -61,6 +63,8 @@ struct mlxsw_driver;
struct mlxsw_bus;
struct mlxsw_bus_info;
+void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core);
+
int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver);
void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver);
@@ -74,10 +78,9 @@ struct mlxsw_tx_info {
bool is_emad;
};
-bool mlxsw_core_skb_transmit_busy(void *driver_priv,
+bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
const struct mlxsw_tx_info *tx_info);
-
-int mlxsw_core_skb_transmit(void *driver_priv, struct sk_buff *skb,
+int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
struct mlxsw_rx_listener {
@@ -106,6 +109,19 @@ void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
const struct mlxsw_event_listener *el,
void *priv);
+typedef void mlxsw_reg_trans_cb_t(struct mlxsw_core *mlxsw_core, char *payload,
+ size_t payload_len, unsigned long cb_priv);
+
+int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
+int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
+ const struct mlxsw_reg_info *reg, char *payload,
+ struct list_head *bulk_list,
+ mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv);
+int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list);
+
int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
const struct mlxsw_reg_info *reg, char *payload);
int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
@@ -131,6 +147,26 @@ u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
u16 lag_id, u8 local_port);
+struct mlxsw_core_port {
+ struct devlink_port devlink_port;
+};
+
+static inline void *
+mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
+{
+ /* mlxsw_core_port is ensured to always be the first field in driver
+ * port structure.
+ */
+ return mlxsw_core_port;
+}
+
+int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
+ struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
+ struct net_device *dev, bool split, u32 split_group);
+void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port);
+
+int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
+
#define MLXSW_CONFIG_PROFILE_SWID_COUNT 8
struct mlxsw_swid_config {
@@ -154,7 +190,8 @@ struct mlxsw_config_profile {
used_max_ib_mc:1,
used_max_pkey:1,
used_ar_sec:1,
- used_adaptive_routing_group_cap:1;
+ used_adaptive_routing_group_cap:1,
+ used_kvd_sizes:1;
u8 max_vepa_channels;
u16 max_lag;
u16 max_port_per_lag;
@@ -175,6 +212,10 @@ struct mlxsw_config_profile {
u8 ar_sec;
u16 adaptive_routing_group_cap;
u8 arn;
+ u32 kvd_linear_size;
+ u32 kvd_hash_single_size;
+ u32 kvd_hash_double_size;
+ u8 resource_query_enable;
struct mlxsw_swid_config swid_config[MLXSW_CONFIG_PROFILE_SWID_COUNT];
};
@@ -183,21 +224,61 @@ struct mlxsw_driver {
const char *kind;
struct module *owner;
size_t priv_size;
- int (*init)(void *driver_priv, struct mlxsw_core *mlxsw_core,
+ int (*init)(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info);
- void (*fini)(void *driver_priv);
- int (*port_split)(void *driver_priv, u8 local_port, unsigned int count);
- int (*port_unsplit)(void *driver_priv, u8 local_port);
+ void (*fini)(struct mlxsw_core *mlxsw_core);
+ int (*port_split)(struct mlxsw_core *mlxsw_core, u8 local_port,
+ unsigned int count);
+ int (*port_unsplit)(struct mlxsw_core *mlxsw_core, u8 local_port);
+ int (*sb_pool_get)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+ int (*sb_pool_set)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type);
+ int (*sb_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+ int (*sb_port_pool_set)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold);
+ int (*sb_tc_pool_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+ int (*sb_tc_pool_bind_set)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold);
+ int (*sb_occ_snapshot)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+ int (*sb_occ_max_clear)(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+ int (*sb_occ_port_pool_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+ int (*sb_occ_tc_port_bind_get)(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
void (*txhdr_construct)(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info);
u8 txhdr_len;
const struct mlxsw_config_profile *profile;
};
+struct mlxsw_resources {
+ u8 max_span_valid:1;
+ u8 max_span;
+};
+
+struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core);
+
struct mlxsw_bus {
const char *kind;
int (*init)(void *bus_priv, struct mlxsw_core *mlxsw_core,
- const struct mlxsw_config_profile *profile);
+ const struct mlxsw_config_profile *profile,
+ struct mlxsw_resources *resources);
void (*fini)(void *bus_priv);
bool (*skb_transmit_busy)(void *bus_priv,
const struct mlxsw_tx_info *tx_info);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 7f4173c8eda3..1d1360c178bb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -1154,6 +1154,61 @@ mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
}
+#define MLXSW_RESOURCES_TABLE_END_ID 0xffff
+#define MLXSW_MAX_SPAN_ID 0x2420
+#define MLXSW_RESOURCES_QUERY_MAX_QUERIES 100
+#define MLXSW_RESOURCES_PER_QUERY 32
+
+static void mlxsw_pci_resources_query_parse(int id, u64 val,
+ struct mlxsw_resources *resources)
+{
+ switch (id) {
+ case MLXSW_MAX_SPAN_ID:
+ resources->max_span = val;
+ resources->max_span_valid = 1;
+ break;
+ default:
+ break;
+ }
+}
+
+static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
+ struct mlxsw_resources *resources,
+ u8 query_enabled)
+{
+ int index, i;
+ u64 data;
+ u16 id;
+ int err;
+
+ /* Not all the versions support resources query */
+ if (!query_enabled)
+ return 0;
+
+ mlxsw_cmd_mbox_zero(mbox);
+
+ for (index = 0; index < MLXSW_RESOURCES_QUERY_MAX_QUERIES; index++) {
+ err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index);
+ if (err)
+ return err;
+
+ for (i = 0; i < MLXSW_RESOURCES_PER_QUERY; i++) {
+ id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
+ data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
+
+ if (id == MLXSW_RESOURCES_TABLE_END_ID)
+ return 0;
+
+ mlxsw_pci_resources_query_parse(id, data, resources);
+ }
+ }
+
+ /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
+ * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
+ */
+ return -EIO;
+}
+
static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
const struct mlxsw_config_profile *profile)
{
@@ -1255,6 +1310,20 @@ static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
mbox, profile->adaptive_routing_group_cap);
}
+ if (profile->used_kvd_sizes) {
+ mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(
+ mbox, profile->kvd_linear_size);
+ mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(
+ mbox, profile->kvd_hash_single_size);
+ mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
+ mbox, 1);
+ mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(
+ mbox, profile->kvd_hash_double_size);
+ }
for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
@@ -1390,7 +1459,8 @@ static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
}
static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
- const struct mlxsw_config_profile *profile)
+ const struct mlxsw_config_profile *profile,
+ struct mlxsw_resources *resources)
{
struct mlxsw_pci *mlxsw_pci = bus_priv;
struct pci_dev *pdev = mlxsw_pci->pdev;
@@ -1449,6 +1519,11 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
if (err)
goto err_boardinfo;
+ err = mlxsw_pci_resources_query(mlxsw_pci, mbox, resources,
+ profile->resource_query_enable);
+ if (err)
+ goto err_query_resources;
+
err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
if (err)
goto err_config_profile;
@@ -1471,6 +1546,7 @@ err_request_eq_irq:
mlxsw_pci_aqs_fini(mlxsw_pci);
err_aqs_init:
err_config_profile:
+err_query_resources:
err_boardinfo:
mlxsw_pci_fw_area_fini(mlxsw_pci);
err_fw_area_init:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index f33b997f2b61..af371a82c35b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -56,6 +56,7 @@
#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1)
#define MLXSW_PORT_CPU_PORT 0x0
+#define MLXSW_PORT_ROUTER_PORT (MLXSW_PORT_MAX_PHY_PORTS + 2)
#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index ffe4c0305733..1721098eef13 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1,9 +1,10 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/reg.h
* Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
- * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2015-2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -386,7 +387,9 @@ enum mlxsw_reg_sfd_rec_action {
/* forward and trap, trap_id is FDB_TRAP */
MLXSW_REG_SFD_REC_ACTION_MIRROR_TO_CPU = 1,
/* trap and do not forward, trap_id is FDB_TRAP */
- MLXSW_REG_SFD_REC_ACTION_TRAP = 3,
+ MLXSW_REG_SFD_REC_ACTION_TRAP = 2,
+ /* forward to IP router */
+ MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER = 3,
MLXSW_REG_SFD_REC_ACTION_DISCARD_ERROR = 15,
};
@@ -1805,6 +1808,184 @@ static inline void mlxsw_reg_spvmlr_pack(char *payload, u8 local_port,
}
}
+/* QTCT - QoS Switch Traffic Class Table
+ * -------------------------------------
+ * Configures the mapping between the packet switch priority and the
+ * traffic class on the transmit port.
+ */
+#define MLXSW_REG_QTCT_ID 0x400A
+#define MLXSW_REG_QTCT_LEN 0x08
+
+static const struct mlxsw_reg_info mlxsw_reg_qtct = {
+ .id = MLXSW_REG_QTCT_ID,
+ .len = MLXSW_REG_QTCT_LEN,
+};
+
+/* reg_qtct_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is not supported.
+ */
+MLXSW_ITEM32(reg, qtct, local_port, 0x00, 16, 8);
+
+/* reg_qtct_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qtct, sub_port, 0x00, 8, 8);
+
+/* reg_qtct_switch_prio
+ * Switch priority.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qtct, switch_prio, 0x00, 0, 4);
+
+/* reg_qtct_tclass
+ * Traffic class.
+ * Default values:
+ * switch_prio 0 : tclass 1
+ * switch_prio 1 : tclass 0
+ * switch_prio i : tclass i, for i > 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qtct, tclass, 0x04, 0, 4);
+
+static inline void mlxsw_reg_qtct_pack(char *payload, u8 local_port,
+ u8 switch_prio, u8 tclass)
+{
+ MLXSW_REG_ZERO(qtct, payload);
+ mlxsw_reg_qtct_local_port_set(payload, local_port);
+ mlxsw_reg_qtct_switch_prio_set(payload, switch_prio);
+ mlxsw_reg_qtct_tclass_set(payload, tclass);
+}
+
+/* QEEC - QoS ETS Element Configuration Register
+ * ---------------------------------------------
+ * Configures the ETS elements.
+ */
+#define MLXSW_REG_QEEC_ID 0x400D
+#define MLXSW_REG_QEEC_LEN 0x1C
+
+static const struct mlxsw_reg_info mlxsw_reg_qeec = {
+ .id = MLXSW_REG_QEEC_ID,
+ .len = MLXSW_REG_QEEC_LEN,
+};
+
+/* reg_qeec_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is supported.
+ */
+MLXSW_ITEM32(reg, qeec, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_qeec_hr {
+ MLXSW_REG_QEEC_HIERARCY_PORT,
+ MLXSW_REG_QEEC_HIERARCY_GROUP,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ MLXSW_REG_QEEC_HIERARCY_TC,
+};
+
+/* reg_qeec_element_hierarchy
+ * 0 - Port
+ * 1 - Group
+ * 2 - Subgroup
+ * 3 - Traffic Class
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qeec, element_hierarchy, 0x04, 16, 4);
+
+/* reg_qeec_element_index
+ * The index of the element in the hierarchy.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, qeec, element_index, 0x04, 0, 8);
+
+/* reg_qeec_next_element_index
+ * The index of the next (lower) element in the hierarchy.
+ * Access: RW
+ *
+ * Note: Reserved for element_hierarchy 0.
+ */
+MLXSW_ITEM32(reg, qeec, next_element_index, 0x08, 0, 8);
+
+enum {
+ MLXSW_REG_QEEC_BYTES_MODE,
+ MLXSW_REG_QEEC_PACKETS_MODE,
+};
+
+/* reg_qeec_pb
+ * Packets or bytes mode.
+ * 0 - Bytes mode
+ * 1 - Packets mode
+ * Access: RW
+ *
+ * Note: Used for max shaper configuration. For Spectrum, packets mode
+ * is supported only for traffic classes of CPU port.
+ */
+MLXSW_ITEM32(reg, qeec, pb, 0x0C, 28, 1);
+
+/* reg_qeec_mase
+ * Max shaper configuration enable. Enables configuration of the max
+ * shaper on this ETS element.
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, mase, 0x10, 31, 1);
+
+/* A large max rate will disable the max shaper. */
+#define MLXSW_REG_QEEC_MAS_DIS 200000000 /* Kbps */
+
+/* reg_qeec_max_shaper_rate
+ * Max shaper information rate.
+ * For CPU port, can only be configured for port hierarchy.
+ * When in bytes mode, value is specified in units of 1000bps.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, max_shaper_rate, 0x10, 0, 28);
+
+/* reg_qeec_de
+ * DWRR configuration enable. Enables configuration of the dwrr and
+ * dwrr_weight.
+ * 0 - Disable
+ * 1 - Enable
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, de, 0x18, 31, 1);
+
+/* reg_qeec_dwrr
+ * Transmission selection algorithm to use on the link going down from
+ * the ETS element.
+ * 0 - Strict priority
+ * 1 - DWRR
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, dwrr, 0x18, 15, 1);
+
+/* reg_qeec_dwrr_weight
+ * DWRR weight on the link going down from the ETS element. The
+ * percentage of bandwidth guaranteed to an ETS element within
+ * its hierarchy. The sum of all weights across all ETS elements
+ * within one hierarchy should be equal to 100. Reserved when
+ * transmission selection algorithm is strict priority.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, qeec, dwrr_weight, 0x18, 0, 8);
+
+static inline void mlxsw_reg_qeec_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index,
+ u8 next_index)
+{
+ MLXSW_REG_ZERO(qeec, payload);
+ mlxsw_reg_qeec_local_port_set(payload, local_port);
+ mlxsw_reg_qeec_element_hierarchy_set(payload, hr);
+ mlxsw_reg_qeec_element_index_set(payload, index);
+ mlxsw_reg_qeec_next_element_index_set(payload, next_index);
+}
+
/* PMLP - Ports Module to Local Port Register
* ------------------------------------------
* Configures the assignment of modules to local ports.
@@ -2141,6 +2322,145 @@ static inline void mlxsw_reg_paos_pack(char *payload, u8 local_port,
mlxsw_reg_paos_e_set(payload, 1);
}
+/* PFCC - Ports Flow Control Configuration Register
+ * ------------------------------------------------
+ * Configures and retrieves the per port flow control configuration.
+ */
+#define MLXSW_REG_PFCC_ID 0x5007
+#define MLXSW_REG_PFCC_LEN 0x20
+
+static const struct mlxsw_reg_info mlxsw_reg_pfcc = {
+ .id = MLXSW_REG_PFCC_ID,
+ .len = MLXSW_REG_PFCC_LEN,
+};
+
+/* reg_pfcc_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pfcc, local_port, 0x00, 16, 8);
+
+/* reg_pfcc_pnat
+ * Port number access type. Determines the way local_port is interpreted:
+ * 0 - Local port number.
+ * 1 - IB / label port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pfcc, pnat, 0x00, 14, 2);
+
+/* reg_pfcc_shl_cap
+ * Send to higher layers capabilities:
+ * 0 - No capability of sending Pause and PFC frames to higher layers.
+ * 1 - Device has capability of sending Pause and PFC frames to higher
+ * layers.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pfcc, shl_cap, 0x00, 1, 1);
+
+/* reg_pfcc_shl_opr
+ * Send to higher layers operation:
+ * 0 - Pause and PFC frames are handled by the port (default).
+ * 1 - Pause and PFC frames are handled by the port and also sent to
+ * higher layers. Only valid if shl_cap = 1.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, shl_opr, 0x00, 0, 1);
+
+/* reg_pfcc_ppan
+ * Pause policy auto negotiation.
+ * 0 - Disabled. Generate / ignore Pause frames based on pptx / pprtx.
+ * 1 - Enabled. When auto-negotiation is performed, set the Pause policy
+ * based on the auto-negotiation resolution.
+ * Access: RW
+ *
+ * Note: The auto-negotiation advertisement is set according to pptx and
+ * pprtx. When PFC is set on Tx / Rx, ppan must be set to 0.
+ */
+MLXSW_ITEM32(reg, pfcc, ppan, 0x04, 28, 4);
+
+/* reg_pfcc_prio_mask_tx
+ * Bit per priority indicating if Tx flow control policy should be
+ * updated based on bit pfctx.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pfcc, prio_mask_tx, 0x04, 16, 8);
+
+/* reg_pfcc_prio_mask_rx
+ * Bit per priority indicating if Rx flow control policy should be
+ * updated based on bit pfcrx.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pfcc, prio_mask_rx, 0x04, 0, 8);
+
+/* reg_pfcc_pptx
+ * Admin Pause policy on Tx.
+ * 0 - Never generate Pause frames (default).
+ * 1 - Generate Pause frames according to Rx buffer threshold.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, pptx, 0x08, 31, 1);
+
+/* reg_pfcc_aptx
+ * Active (operational) Pause policy on Tx.
+ * 0 - Never generate Pause frames.
+ * 1 - Generate Pause frames according to Rx buffer threshold.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pfcc, aptx, 0x08, 30, 1);
+
+/* reg_pfcc_pfctx
+ * Priority based flow control policy on Tx[7:0]. Per-priority bit mask:
+ * 0 - Never generate priority Pause frames on the specified priority
+ * (default).
+ * 1 - Generate priority Pause frames according to Rx buffer threshold on
+ * the specified priority.
+ * Access: RW
+ *
+ * Note: pfctx and pptx must be mutually exclusive.
+ */
+MLXSW_ITEM32(reg, pfcc, pfctx, 0x08, 16, 8);
+
+/* reg_pfcc_pprx
+ * Admin Pause policy on Rx.
+ * 0 - Ignore received Pause frames (default).
+ * 1 - Respect received Pause frames.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, pprx, 0x0C, 31, 1);
+
+/* reg_pfcc_aprx
+ * Active (operational) Pause policy on Rx.
+ * 0 - Ignore received Pause frames.
+ * 1 - Respect received Pause frames.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pfcc, aprx, 0x0C, 30, 1);
+
+/* reg_pfcc_pfcrx
+ * Priority based flow control policy on Rx[7:0]. Per-priority bit mask:
+ * 0 - Ignore incoming priority Pause frames on the specified priority
+ * (default).
+ * 1 - Respect incoming priority Pause frames on the specified priority.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pfcc, pfcrx, 0x0C, 16, 8);
+
+#define MLXSW_REG_PFCC_ALL_PRIO 0xFF
+
+static inline void mlxsw_reg_pfcc_prio_pack(char *payload, u8 pfc_en)
+{
+ mlxsw_reg_pfcc_prio_mask_tx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
+ mlxsw_reg_pfcc_prio_mask_rx_set(payload, MLXSW_REG_PFCC_ALL_PRIO);
+ mlxsw_reg_pfcc_pfctx_set(payload, pfc_en);
+ mlxsw_reg_pfcc_pfcrx_set(payload, pfc_en);
+}
+
+static inline void mlxsw_reg_pfcc_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(pfcc, payload);
+ mlxsw_reg_pfcc_local_port_set(payload, local_port);
+}
+
/* PPCNT - Ports Performance Counters Register
* -------------------------------------------
* The PPCNT register retrieves per port performance counters.
@@ -2180,6 +2500,12 @@ MLXSW_ITEM32(reg, ppcnt, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2);
+enum mlxsw_reg_ppcnt_grp {
+ MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0,
+ MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
+ MLXSW_REG_PPCNT_TC_CNT = 0x11,
+};
+
/* reg_ppcnt_grp
* Performance counter group.
* Group 63 indicates all groups. Only valid on Set() operation with
@@ -2215,6 +2541,8 @@ MLXSW_ITEM32(reg, ppcnt, clr, 0x04, 31, 1);
*/
MLXSW_ITEM32(reg, ppcnt, prio_tc, 0x04, 0, 5);
+/* Ethernet IEEE 802.3 Counter Group */
+
/* reg_ppcnt_a_frames_transmitted_ok
* Access: RO
*/
@@ -2329,15 +2657,177 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received,
MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted,
0x08 + 0x90, 0, 64);
-static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
+/* Ethernet Per Priority Group Counters */
+
+/* reg_ppcnt_rx_octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_octets, 0x08 + 0x00, 0, 64);
+
+/* reg_ppcnt_rx_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_frames, 0x08 + 0x20, 0, 64);
+
+/* reg_ppcnt_tx_octets
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_octets, 0x08 + 0x28, 0, 64);
+
+/* reg_ppcnt_tx_frames
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_frames, 0x08 + 0x48, 0, 64);
+
+/* reg_ppcnt_rx_pause
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_pause, 0x08 + 0x50, 0, 64);
+
+/* reg_ppcnt_rx_pause_duration
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, rx_pause_duration, 0x08 + 0x58, 0, 64);
+
+/* reg_ppcnt_tx_pause
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_pause, 0x08 + 0x60, 0, 64);
+
+/* reg_ppcnt_tx_pause_duration
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_pause_duration, 0x08 + 0x68, 0, 64);
+
+/* reg_ppcnt_rx_pause_transition
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tx_pause_transition, 0x08 + 0x70, 0, 64);
+
+/* Ethernet Per Traffic Group Counters */
+
+/* reg_ppcnt_tc_transmit_queue
+ * Contains the transmit queue depth in cells of traffic class
+ * selected by prio_tc and the port selected by local_port.
+ * The field cannot be cleared.
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tc_transmit_queue, 0x08 + 0x00, 0, 64);
+
+/* reg_ppcnt_tc_no_buffer_discard_uc
+ * The number of unicast packets dropped due to lack of shared
+ * buffer resources.
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc, 0x08 + 0x08, 0, 64);
+
+static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_ppcnt_grp grp,
+ u8 prio_tc)
{
MLXSW_REG_ZERO(ppcnt, payload);
mlxsw_reg_ppcnt_swid_set(payload, 0);
mlxsw_reg_ppcnt_local_port_set(payload, local_port);
mlxsw_reg_ppcnt_pnat_set(payload, 0);
- mlxsw_reg_ppcnt_grp_set(payload, 0);
+ mlxsw_reg_ppcnt_grp_set(payload, grp);
mlxsw_reg_ppcnt_clr_set(payload, 0);
- mlxsw_reg_ppcnt_prio_tc_set(payload, 0);
+ mlxsw_reg_ppcnt_prio_tc_set(payload, prio_tc);
+}
+
+/* PPTB - Port Prio To Buffer Register
+ * -----------------------------------
+ * Configures the switch priority to buffer table.
+ */
+#define MLXSW_REG_PPTB_ID 0x500B
+#define MLXSW_REG_PPTB_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_pptb = {
+ .id = MLXSW_REG_PPTB_ID,
+ .len = MLXSW_REG_PPTB_LEN,
+};
+
+enum {
+ MLXSW_REG_PPTB_MM_UM,
+ MLXSW_REG_PPTB_MM_UNICAST,
+ MLXSW_REG_PPTB_MM_MULTICAST,
+};
+
+/* reg_pptb_mm
+ * Mapping mode.
+ * 0 - Map both unicast and multicast packets to the same buffer.
+ * 1 - Map only unicast packets.
+ * 2 - Map only multicast packets.
+ * Access: Index
+ *
+ * Note: SwitchX-2 only supports the first option.
+ */
+MLXSW_ITEM32(reg, pptb, mm, 0x00, 28, 2);
+
+/* reg_pptb_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pptb, local_port, 0x00, 16, 8);
+
+/* reg_pptb_um
+ * Enables the update of the untagged_buf field.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pptb, um, 0x00, 8, 1);
+
+/* reg_pptb_pm
+ * Enables the update of the prio_to_buff field.
+ * Bit <i> is a flag for updating the mapping for switch priority <i>.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pptb, pm, 0x00, 0, 8);
+
+/* reg_pptb_prio_to_buff
+ * Mapping of switch priority <i> to one of the allocated receive port
+ * buffers.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff, 0x04, 0x04, 4);
+
+/* reg_pptb_pm_msb
+ * Enables the update of the prio_to_buff field.
+ * Bit <i> is a flag for updating the mapping for switch priority <i+8>.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pptb, pm_msb, 0x08, 24, 8);
+
+/* reg_pptb_untagged_buff
+ * Mapping of untagged frames to one of the allocated receive port buffers.
+ * Access: RW
+ *
+ * Note: In SwitchX-2 this field must be mapped to buffer 8. Reserved for
+ * Spectrum, as it maps untagged packets based on the default switch priority.
+ */
+MLXSW_ITEM32(reg, pptb, untagged_buff, 0x08, 0, 4);
+
+/* reg_pptb_prio_to_buff_msb
+ * Mapping of switch priority <i+8> to one of the allocated receive port
+ * buffers.
+ * Access: RW
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, pptb, prio_to_buff_msb, 0x0C, 0x04, 4);
+
+#define MLXSW_REG_PPTB_ALL_PRIO 0xFF
+
+static inline void mlxsw_reg_pptb_pack(char *payload, u8 local_port)
+{
+ MLXSW_REG_ZERO(pptb, payload);
+ mlxsw_reg_pptb_mm_set(payload, MLXSW_REG_PPTB_MM_UM);
+ mlxsw_reg_pptb_local_port_set(payload, local_port);
+ mlxsw_reg_pptb_pm_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+ mlxsw_reg_pptb_pm_msb_set(payload, MLXSW_REG_PPTB_ALL_PRIO);
+}
+
+static inline void mlxsw_reg_pptb_prio_to_buff_pack(char *payload, u8 prio,
+ u8 buff)
+{
+ mlxsw_reg_pptb_prio_to_buff_set(payload, prio, buff);
+ mlxsw_reg_pptb_prio_to_buff_msb_set(payload, prio, buff);
}
/* PBMC - Port Buffer Management Control Register
@@ -2346,7 +2836,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
* allocation for different Prios, and the Pause threshold management.
*/
#define MLXSW_REG_PBMC_ID 0x500C
-#define MLXSW_REG_PBMC_LEN 0x68
+#define MLXSW_REG_PBMC_LEN 0x6C
static const struct mlxsw_reg_info mlxsw_reg_pbmc = {
.id = MLXSW_REG_PBMC_ID,
@@ -2374,6 +2864,8 @@ MLXSW_ITEM32(reg, pbmc, xoff_timer_value, 0x04, 16, 16);
*/
MLXSW_ITEM32(reg, pbmc, xoff_refresh, 0x04, 0, 16);
+#define MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX 11
+
/* reg_pbmc_buf_lossy
* The field indicates if the buffer is lossy.
* 0 - Lossless
@@ -2398,6 +2890,30 @@ MLXSW_ITEM32_INDEXED(reg, pbmc, buf_epsb, 0x0C, 24, 1, 0x08, 0x00, false);
*/
MLXSW_ITEM32_INDEXED(reg, pbmc, buf_size, 0x0C, 0, 16, 0x08, 0x00, false);
+/* reg_pbmc_buf_xoff_threshold
+ * Once the amount of data in the buffer goes above this value, device
+ * starts sending PFC frames for all priorities associated with the
+ * buffer. Units are represented in cells. Reserved in case of lossy
+ * buffer.
+ * Access: RW
+ *
+ * Note: In Spectrum, reserved for buffer[9].
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xoff_threshold, 0x0C, 16, 16,
+ 0x08, 0x04, false);
+
+/* reg_pbmc_buf_xon_threshold
+ * When the amount of data in the buffer goes below this value, device
+ * stops sending PFC frames for the priorities associated with the
+ * buffer. Units are represented in cells. Reserved in case of lossy
+ * buffer.
+ * Access: RW
+ *
+ * Note: In Spectrum, reserved for buffer[9].
+ */
+MLXSW_ITEM32_INDEXED(reg, pbmc, buf_xon_threshold, 0x0C, 0, 16,
+ 0x08, 0x04, false);
+
static inline void mlxsw_reg_pbmc_pack(char *payload, u8 local_port,
u16 xoff_timer_value, u16 xoff_refresh)
{
@@ -2416,6 +2932,17 @@ static inline void mlxsw_reg_pbmc_lossy_buffer_pack(char *payload,
mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
}
+static inline void mlxsw_reg_pbmc_lossless_buffer_pack(char *payload,
+ int buf_index, u16 size,
+ u16 threshold)
+{
+ mlxsw_reg_pbmc_buf_lossy_set(payload, buf_index, 0);
+ mlxsw_reg_pbmc_buf_epsb_set(payload, buf_index, 0);
+ mlxsw_reg_pbmc_buf_size_set(payload, buf_index, size);
+ mlxsw_reg_pbmc_buf_xoff_threshold_set(payload, buf_index, threshold);
+ mlxsw_reg_pbmc_buf_xon_threshold_set(payload, buf_index, threshold);
+}
+
/* PSPA - Port Switch Partition Allocation
* ---------------------------------------
* Controls the association of a port with a switch partition and enables
@@ -2695,6 +3222,1194 @@ static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id)
mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
}
+/* RGCR - Router General Configuration Register
+ * --------------------------------------------
+ * The register is used for setting up the router configuration.
+ */
+#define MLXSW_REG_RGCR_ID 0x8001
+#define MLXSW_REG_RGCR_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_rgcr = {
+ .id = MLXSW_REG_RGCR_ID,
+ .len = MLXSW_REG_RGCR_LEN,
+};
+
+/* reg_rgcr_ipv4_en
+ * IPv4 router enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rgcr, ipv4_en, 0x00, 31, 1);
+
+/* reg_rgcr_ipv6_en
+ * IPv6 router enable.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rgcr, ipv6_en, 0x00, 30, 1);
+
+/* reg_rgcr_max_router_interfaces
+ * Defines the maximum number of active router interfaces for all virtual
+ * routers.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rgcr, max_router_interfaces, 0x10, 0, 16);
+
+/* reg_rgcr_usp
+ * Update switch priority and packet color.
+ * 0 - Preserve the value of Switch Priority and packet color.
+ * 1 - Recalculate the value of Switch Priority and packet color.
+ * Access: RW
+ *
+ * Note: Not supported by SwitchX and SwitchX-2.
+ */
+MLXSW_ITEM32(reg, rgcr, usp, 0x18, 20, 1);
+
+/* reg_rgcr_pcp_rw
+ * Indicates how to handle the pcp_rewrite_en value:
+ * 0 - Preserve the value of pcp_rewrite_en.
+ * 2 - Disable PCP rewrite.
+ * 3 - Enable PCP rewrite.
+ * Access: RW
+ *
+ * Note: Not supported by SwitchX and SwitchX-2.
+ */
+MLXSW_ITEM32(reg, rgcr, pcp_rw, 0x18, 16, 2);
+
+/* reg_rgcr_activity_dis
+ * Activity disable:
+ * 0 - Activity will be set when an entry is hit (default).
+ * 1 - Activity will not be set when an entry is hit.
+ *
+ * Bit 0 - Disable activity bit in Router Algorithmic LPM Unicast Entry
+ * (RALUE).
+ * Bit 1 - Disable activity bit in Router Algorithmic LPM Unicast Host
+ * Entry (RAUHT).
+ * Bits 2:7 are reserved.
+ * Access: RW
+ *
+ * Note: Not supported by SwitchX, SwitchX-2 and Switch-IB.
+ */
+MLXSW_ITEM32(reg, rgcr, activity_dis, 0x20, 0, 8);
+
+static inline void mlxsw_reg_rgcr_pack(char *payload, bool ipv4_en)
+{
+ MLXSW_REG_ZERO(rgcr, payload);
+ mlxsw_reg_rgcr_ipv4_en_set(payload, ipv4_en);
+}
+
+/* RITR - Router Interface Table Register
+ * --------------------------------------
+ * The register is used to configure the router interface table.
+ */
+#define MLXSW_REG_RITR_ID 0x8002
+#define MLXSW_REG_RITR_LEN 0x40
+
+static const struct mlxsw_reg_info mlxsw_reg_ritr = {
+ .id = MLXSW_REG_RITR_ID,
+ .len = MLXSW_REG_RITR_LEN,
+};
+
+/* reg_ritr_enable
+ * Enables routing on the router interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, enable, 0x00, 31, 1);
+
+/* reg_ritr_ipv4
+ * IPv4 routing enable. Enables routing of IPv4 traffic on the router
+ * interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv4, 0x00, 29, 1);
+
+/* reg_ritr_ipv6
+ * IPv6 routing enable. Enables routing of IPv6 traffic on the router
+ * interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv6, 0x00, 28, 1);
+
+enum mlxsw_reg_ritr_if_type {
+ MLXSW_REG_RITR_VLAN_IF,
+ MLXSW_REG_RITR_FID_IF,
+ MLXSW_REG_RITR_SP_IF,
+};
+
+/* reg_ritr_type
+ * Router interface type.
+ * 0 - VLAN interface.
+ * 1 - FID interface.
+ * 2 - Sub-port interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, type, 0x00, 23, 3);
+
+enum {
+ MLXSW_REG_RITR_RIF_CREATE,
+ MLXSW_REG_RITR_RIF_DEL,
+};
+
+/* reg_ritr_op
+ * Opcode:
+ * 0 - Create or edit RIF.
+ * 1 - Delete RIF.
+ * Reserved for SwitchX-2. For Spectrum, editing of interface properties
+ * is not supported. An interface must be deleted and re-created in order
+ * to update properties.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ritr, op, 0x00, 20, 2);
+
+/* reg_ritr_rif
+ * Router interface index. A pointer to the Router Interface Table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ritr, rif, 0x00, 0, 16);
+
+/* reg_ritr_ipv4_fe
+ * IPv4 Forwarding Enable.
+ * Enables routing of IPv4 traffic on the router interface. When disabled,
+ * forwarding is blocked but local traffic (traps and IP2ME) will be enabled.
+ * Not supported in SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1);
+
+/* reg_ritr_ipv6_fe
+ * IPv6 Forwarding Enable.
+ * Enables routing of IPv6 traffic on the router interface. When disabled,
+ * forwarding is blocked but local traffic (traps and IP2ME) will be enabled.
+ * Not supported in SwitchX-2.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
+
+/* reg_ritr_lb_en
+ * Loop-back filter enable for unicast packets.
+ * If the flag is set then loop-back filter for unicast packets is
+ * implemented on the RIF. Multicast packets are always subject to
+ * loop-back filtering.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1);
+
+/* reg_ritr_virtual_router
+ * Virtual router ID associated with the router interface.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, virtual_router, 0x04, 0, 16);
+
+/* reg_ritr_mtu
+ * Router interface MTU.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, mtu, 0x34, 0, 16);
+
+/* reg_ritr_if_swid
+ * Switch partition ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8);
+
+/* reg_ritr_if_mac
+ * Router interface MAC address.
+ * In Spectrum, all MAC addresses must have the same 38 MSBits.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ritr, if_mac, 0x12, 6);
+
+/* VLAN Interface */
+
+/* reg_ritr_vlan_if_vid
+ * VLAN ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, vlan_if_vid, 0x08, 0, 12);
+
+/* FID Interface */
+
+/* reg_ritr_fid_if_fid
+ * Filtering ID. Used to connect a bridge to the router. Only FIDs from
+ * the vFID range are supported.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, fid_if_fid, 0x08, 0, 16);
+
+static inline void mlxsw_reg_ritr_fid_set(char *payload,
+ enum mlxsw_reg_ritr_if_type rif_type,
+ u16 fid)
+{
+ if (rif_type == MLXSW_REG_RITR_FID_IF)
+ mlxsw_reg_ritr_fid_if_fid_set(payload, fid);
+ else
+ mlxsw_reg_ritr_vlan_if_vid_set(payload, fid);
+}
+
+/* Sub-port Interface */
+
+/* reg_ritr_sp_if_lag
+ * LAG indication. When this bit is set the system_port field holds the
+ * LAG identifier.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_lag, 0x08, 24, 1);
+
+/* reg_ritr_sp_system_port
+ * Port unique indentifier. When lag bit is set, this field holds the
+ * lag_id in bits 0:9.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_system_port, 0x08, 0, 16);
+
+/* reg_ritr_sp_if_vid
+ * VLAN ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, sp_if_vid, 0x18, 0, 12);
+
+static inline void mlxsw_reg_ritr_rif_pack(char *payload, u16 rif)
+{
+ MLXSW_REG_ZERO(ritr, payload);
+ mlxsw_reg_ritr_rif_set(payload, rif);
+}
+
+static inline void mlxsw_reg_ritr_sp_if_pack(char *payload, bool lag,
+ u16 system_port, u16 vid)
+{
+ mlxsw_reg_ritr_sp_if_lag_set(payload, lag);
+ mlxsw_reg_ritr_sp_if_system_port_set(payload, system_port);
+ mlxsw_reg_ritr_sp_if_vid_set(payload, vid);
+}
+
+static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
+ enum mlxsw_reg_ritr_if_type type,
+ u16 rif, u16 mtu, const char *mac)
+{
+ bool op = enable ? MLXSW_REG_RITR_RIF_CREATE : MLXSW_REG_RITR_RIF_DEL;
+
+ MLXSW_REG_ZERO(ritr, payload);
+ mlxsw_reg_ritr_enable_set(payload, enable);
+ mlxsw_reg_ritr_ipv4_set(payload, 1);
+ mlxsw_reg_ritr_type_set(payload, type);
+ mlxsw_reg_ritr_op_set(payload, op);
+ mlxsw_reg_ritr_rif_set(payload, rif);
+ mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
+ mlxsw_reg_ritr_lb_en_set(payload, 1);
+ mlxsw_reg_ritr_mtu_set(payload, mtu);
+ mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
+}
+
+/* RATR - Router Adjacency Table Register
+ * --------------------------------------
+ * The RATR register is used to configure the Router Adjacency (next-hop)
+ * Table.
+ */
+#define MLXSW_REG_RATR_ID 0x8008
+#define MLXSW_REG_RATR_LEN 0x2C
+
+static const struct mlxsw_reg_info mlxsw_reg_ratr = {
+ .id = MLXSW_REG_RATR_ID,
+ .len = MLXSW_REG_RATR_LEN,
+};
+
+enum mlxsw_reg_ratr_op {
+ /* Read */
+ MLXSW_REG_RATR_OP_QUERY_READ = 0,
+ /* Read and clear activity */
+ MLXSW_REG_RATR_OP_QUERY_READ_CLEAR = 2,
+ /* Write Adjacency entry */
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY = 1,
+ /* Write Adjacency entry only if the activity is cleared.
+ * The write may not succeed if the activity is set. There is not
+ * direct feedback if the write has succeeded or not, however
+ * the get will reveal the actual entry (SW can compare the get
+ * response to the set command).
+ */
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY = 3,
+};
+
+/* reg_ratr_op
+ * Note that Write operation may also be used for updating
+ * counter_set_type and counter_index. In this case all other
+ * fields must not be updated.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ratr, op, 0x00, 28, 4);
+
+/* reg_ratr_v
+ * Valid bit. Indicates if the adjacency entry is valid.
+ * Note: the device may need some time before reusing an invalidated
+ * entry. During this time the entry can not be reused. It is
+ * recommended to use another entry before reusing an invalidated
+ * entry (e.g. software can put it at the end of the list for
+ * reusing). Trying to access an invalidated entry not yet cleared
+ * by the device results with failure indicating "Try Again" status.
+ * When valid is '0' then egress_router_interface,trap_action,
+ * adjacency_parameters and counters are reserved
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, v, 0x00, 24, 1);
+
+/* reg_ratr_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on
+ * the specific entry. To clear the a bit, use "clear activity".
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ratr, a, 0x00, 16, 1);
+
+/* reg_ratr_adjacency_index_low
+ * Bits 15:0 of index into the adjacency table.
+ * For SwitchX and SwitchX-2, the adjacency table is linear and
+ * used for adjacency entries only.
+ * For Spectrum, the index is to the KVD linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ratr, adjacency_index_low, 0x04, 0, 16);
+
+/* reg_ratr_egress_router_interface
+ * Range is 0 .. cap_max_router_interfaces - 1
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, egress_router_interface, 0x08, 0, 16);
+
+enum mlxsw_reg_ratr_trap_action {
+ MLXSW_REG_RATR_TRAP_ACTION_NOP,
+ MLXSW_REG_RATR_TRAP_ACTION_TRAP,
+ MLXSW_REG_RATR_TRAP_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_RATR_TRAP_ACTION_MIRROR,
+ MLXSW_REG_RATR_TRAP_ACTION_DISCARD_ERRORS,
+};
+
+/* reg_ratr_trap_action
+ * see mlxsw_reg_ratr_trap_action
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, trap_action, 0x0C, 28, 4);
+
+enum mlxsw_reg_ratr_trap_id {
+ MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS0 = 0,
+ MLXSW_REG_RATR_TRAP_ID_RTR_EGRESS1 = 1,
+};
+
+/* reg_ratr_adjacency_index_high
+ * Bits 23:16 of the adjacency_index.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ratr, adjacency_index_high, 0x0C, 16, 8);
+
+/* reg_ratr_trap_id
+ * Trap ID to be reported to CPU.
+ * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1.
+ * For trap_action of NOP, MIRROR and DISCARD_ERROR
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ratr, trap_id, 0x0C, 0, 8);
+
+/* reg_ratr_eth_destination_mac
+ * MAC address of the destination next-hop.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, ratr, eth_destination_mac, 0x12, 6);
+
+static inline void
+mlxsw_reg_ratr_pack(char *payload,
+ enum mlxsw_reg_ratr_op op, bool valid,
+ u32 adjacency_index, u16 egress_rif)
+{
+ MLXSW_REG_ZERO(ratr, payload);
+ mlxsw_reg_ratr_op_set(payload, op);
+ mlxsw_reg_ratr_v_set(payload, valid);
+ mlxsw_reg_ratr_adjacency_index_low_set(payload, adjacency_index);
+ mlxsw_reg_ratr_adjacency_index_high_set(payload, adjacency_index >> 16);
+ mlxsw_reg_ratr_egress_router_interface_set(payload, egress_rif);
+}
+
+static inline void mlxsw_reg_ratr_eth_entry_pack(char *payload,
+ const char *dest_mac)
+{
+ mlxsw_reg_ratr_eth_destination_mac_memcpy_to(payload, dest_mac);
+}
+
+/* RALTA - Router Algorithmic LPM Tree Allocation Register
+ * -------------------------------------------------------
+ * RALTA is used to allocate the LPM trees of the SHSPM method.
+ */
+#define MLXSW_REG_RALTA_ID 0x8010
+#define MLXSW_REG_RALTA_LEN 0x04
+
+static const struct mlxsw_reg_info mlxsw_reg_ralta = {
+ .id = MLXSW_REG_RALTA_ID,
+ .len = MLXSW_REG_RALTA_LEN,
+};
+
+/* reg_ralta_op
+ * opcode (valid for Write, must be 0 on Read)
+ * 0 - allocate a tree
+ * 1 - deallocate a tree
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ralta, op, 0x00, 28, 2);
+
+enum mlxsw_reg_ralxx_protocol {
+ MLXSW_REG_RALXX_PROTOCOL_IPV4,
+ MLXSW_REG_RALXX_PROTOCOL_IPV6,
+};
+
+/* reg_ralta_protocol
+ * Protocol.
+ * Deallocation opcode: Reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralta, protocol, 0x00, 24, 4);
+
+/* reg_ralta_tree_id
+ * An identifier (numbered from 1..cap_shspm_max_trees-1) representing
+ * the tree identifier (managed by software).
+ * Note that tree_id 0 is allocated for a default-route tree.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralta, tree_id, 0x00, 0, 8);
+
+static inline void mlxsw_reg_ralta_pack(char *payload, bool alloc,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ MLXSW_REG_ZERO(ralta, payload);
+ mlxsw_reg_ralta_op_set(payload, !alloc);
+ mlxsw_reg_ralta_protocol_set(payload, protocol);
+ mlxsw_reg_ralta_tree_id_set(payload, tree_id);
+}
+
+/* RALST - Router Algorithmic LPM Structure Tree Register
+ * ------------------------------------------------------
+ * RALST is used to set and query the structure of an LPM tree.
+ * The structure of the tree must be sorted as a sorted binary tree, while
+ * each node is a bin that is tagged as the length of the prefixes the lookup
+ * will refer to. Therefore, bin X refers to a set of entries with prefixes
+ * of X bits to match with the destination address. The bin 0 indicates
+ * the default action, when there is no match of any prefix.
+ */
+#define MLXSW_REG_RALST_ID 0x8011
+#define MLXSW_REG_RALST_LEN 0x104
+
+static const struct mlxsw_reg_info mlxsw_reg_ralst = {
+ .id = MLXSW_REG_RALST_ID,
+ .len = MLXSW_REG_RALST_LEN,
+};
+
+/* reg_ralst_root_bin
+ * The bin number of the root bin.
+ * 0<root_bin=<(length of IP address)
+ * For a default-route tree configure 0xff
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralst, root_bin, 0x00, 16, 8);
+
+/* reg_ralst_tree_id
+ * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralst, tree_id, 0x00, 0, 8);
+
+#define MLXSW_REG_RALST_BIN_NO_CHILD 0xff
+#define MLXSW_REG_RALST_BIN_OFFSET 0x04
+#define MLXSW_REG_RALST_BIN_COUNT 128
+
+/* reg_ralst_left_child_bin
+ * Holding the children of the bin according to the stored tree's structure.
+ * For trees composed of less than 4 blocks, the bins in excess are reserved.
+ * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
+ * Access: RW
+ */
+MLXSW_ITEM16_INDEXED(reg, ralst, left_child_bin, 0x04, 8, 8, 0x02, 0x00, false);
+
+/* reg_ralst_right_child_bin
+ * Holding the children of the bin according to the stored tree's structure.
+ * For trees composed of less than 4 blocks, the bins in excess are reserved.
+ * Note that tree_id 0 is allocated for a default-route tree, bins are 0xff
+ * Access: RW
+ */
+MLXSW_ITEM16_INDEXED(reg, ralst, right_child_bin, 0x04, 0, 8, 0x02, 0x00,
+ false);
+
+static inline void mlxsw_reg_ralst_pack(char *payload, u8 root_bin, u8 tree_id)
+{
+ MLXSW_REG_ZERO(ralst, payload);
+
+ /* Initialize all bins to have no left or right child */
+ memset(payload + MLXSW_REG_RALST_BIN_OFFSET,
+ MLXSW_REG_RALST_BIN_NO_CHILD, MLXSW_REG_RALST_BIN_COUNT * 2);
+
+ mlxsw_reg_ralst_root_bin_set(payload, root_bin);
+ mlxsw_reg_ralst_tree_id_set(payload, tree_id);
+}
+
+static inline void mlxsw_reg_ralst_bin_pack(char *payload, u8 bin_number,
+ u8 left_child_bin,
+ u8 right_child_bin)
+{
+ int bin_index = bin_number - 1;
+
+ mlxsw_reg_ralst_left_child_bin_set(payload, bin_index, left_child_bin);
+ mlxsw_reg_ralst_right_child_bin_set(payload, bin_index,
+ right_child_bin);
+}
+
+/* RALTB - Router Algorithmic LPM Tree Binding Register
+ * ----------------------------------------------------
+ * RALTB is used to bind virtual router and protocol to an allocated LPM tree.
+ */
+#define MLXSW_REG_RALTB_ID 0x8012
+#define MLXSW_REG_RALTB_LEN 0x04
+
+static const struct mlxsw_reg_info mlxsw_reg_raltb = {
+ .id = MLXSW_REG_RALTB_ID,
+ .len = MLXSW_REG_RALTB_LEN,
+};
+
+/* reg_raltb_virtual_router
+ * Virtual Router ID
+ * Range is 0..cap_max_virtual_routers-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raltb, virtual_router, 0x00, 16, 16);
+
+/* reg_raltb_protocol
+ * Protocol.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raltb, protocol, 0x00, 12, 4);
+
+/* reg_raltb_tree_id
+ * Tree to be used for the {virtual_router, protocol}
+ * Tree identifier numbered from 1..(cap_shspm_max_trees-1).
+ * By default, all Unicast IPv4 and IPv6 are bound to tree_id 0.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, raltb, tree_id, 0x00, 0, 8);
+
+static inline void mlxsw_reg_raltb_pack(char *payload, u16 virtual_router,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u8 tree_id)
+{
+ MLXSW_REG_ZERO(raltb, payload);
+ mlxsw_reg_raltb_virtual_router_set(payload, virtual_router);
+ mlxsw_reg_raltb_protocol_set(payload, protocol);
+ mlxsw_reg_raltb_tree_id_set(payload, tree_id);
+}
+
+/* RALUE - Router Algorithmic LPM Unicast Entry Register
+ * -----------------------------------------------------
+ * RALUE is used to configure and query LPM entries that serve
+ * the Unicast protocols.
+ */
+#define MLXSW_REG_RALUE_ID 0x8013
+#define MLXSW_REG_RALUE_LEN 0x38
+
+static const struct mlxsw_reg_info mlxsw_reg_ralue = {
+ .id = MLXSW_REG_RALUE_ID,
+ .len = MLXSW_REG_RALUE_LEN,
+};
+
+/* reg_ralue_protocol
+ * Protocol.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, protocol, 0x00, 24, 4);
+
+enum mlxsw_reg_ralue_op {
+ /* Read operation. If entry doesn't exist, the operation fails. */
+ MLXSW_REG_RALUE_OP_QUERY_READ = 0,
+ /* Clear on read operation. Used to read entry and
+ * clear Activity bit.
+ */
+ MLXSW_REG_RALUE_OP_QUERY_CLEAR = 1,
+ /* Write operation. Used to write a new entry to the table. All RW
+ * fields are written for new entry. Activity bit is set
+ * for new entries.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_WRITE = 0,
+ /* Update operation. Used to update an existing route entry and
+ * only update the RW fields that are detailed in the field
+ * op_u_mask. If entry doesn't exist, the operation fails.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_UPDATE = 1,
+ /* Clear activity. The Activity bit (the field a) is cleared
+ * for the entry.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_CLEAR = 2,
+ /* Delete operation. Used to delete an existing entry. If entry
+ * doesn't exist, the operation fails.
+ */
+ MLXSW_REG_RALUE_OP_WRITE_DELETE = 3,
+};
+
+/* reg_ralue_op
+ * Operation.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, ralue, op, 0x00, 20, 3);
+
+/* reg_ralue_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on the
+ * specific entry, only if the entry is a route. To clear the a bit, use
+ * "clear activity" op.
+ * Enabled by activity_dis in RGCR
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, ralue, a, 0x00, 16, 1);
+
+/* reg_ralue_virtual_router
+ * Virtual Router ID
+ * Range is 0..cap_max_virtual_routers-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, virtual_router, 0x04, 16, 16);
+
+#define MLXSW_REG_RALUE_OP_U_MASK_ENTRY_TYPE BIT(0)
+#define MLXSW_REG_RALUE_OP_U_MASK_BMP_LEN BIT(1)
+#define MLXSW_REG_RALUE_OP_U_MASK_ACTION BIT(2)
+
+/* reg_ralue_op_u_mask
+ * opcode update mask.
+ * On read operation, this field is reserved.
+ * This field is valid for update opcode, otherwise - reserved.
+ * This field is a bitmask of the fields that should be updated.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, ralue, op_u_mask, 0x04, 8, 3);
+
+/* reg_ralue_prefix_len
+ * Number of bits in the prefix of the LPM route.
+ * Note that for IPv6 prefixes, if prefix_len>64 the entry consumes
+ * two entries in the physical HW table.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, prefix_len, 0x08, 0, 8);
+
+/* reg_ralue_dip*
+ * The prefix of the route or of the marker that the object of the LPM
+ * is compared with. The most significant bits of the dip are the prefix.
+ * The list significant bits must be '0' if the prefix_len is smaller
+ * than 128 for IPv6 or smaller than 32 for IPv4.
+ * IPv4 address uses bits dip[31:0] and bits dip[127:32] are reserved.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, ralue, dip4, 0x18, 0, 32);
+
+enum mlxsw_reg_ralue_entry_type {
+ MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_ENTRY = 1,
+ MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY = 2,
+ MLXSW_REG_RALUE_ENTRY_TYPE_MARKER_AND_ROUTE_ENTRY = 3,
+};
+
+/* reg_ralue_entry_type
+ * Entry type.
+ * Note - for Marker entries, the action_type and action fields are reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, entry_type, 0x1C, 30, 2);
+
+/* reg_ralue_bmp_len
+ * The best match prefix length in the case that there is no match for
+ * longer prefixes.
+ * If (entry_type != MARKER_ENTRY), bmp_len must be equal to prefix_len
+ * Note for any update operation with entry_type modification this
+ * field must be set.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, bmp_len, 0x1C, 16, 8);
+
+enum mlxsw_reg_ralue_action_type {
+ MLXSW_REG_RALUE_ACTION_TYPE_REMOTE,
+ MLXSW_REG_RALUE_ACTION_TYPE_LOCAL,
+ MLXSW_REG_RALUE_ACTION_TYPE_IP2ME,
+};
+
+/* reg_ralue_action_type
+ * Action Type
+ * Indicates how the IP address is connected.
+ * It can be connected to a local subnet through local_erif or can be
+ * on a remote subnet connected through a next-hop router,
+ * or transmitted to the CPU.
+ * Reserved when entry_type = MARKER_ENTRY
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, action_type, 0x1C, 0, 2);
+
+enum mlxsw_reg_ralue_trap_action {
+ MLXSW_REG_RALUE_TRAP_ACTION_NOP,
+ MLXSW_REG_RALUE_TRAP_ACTION_TRAP,
+ MLXSW_REG_RALUE_TRAP_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_RALUE_TRAP_ACTION_MIRROR,
+ MLXSW_REG_RALUE_TRAP_ACTION_DISCARD_ERROR,
+};
+
+/* reg_ralue_trap_action
+ * Trap action.
+ * For IP2ME action, only NOP and MIRROR are possible.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, trap_action, 0x20, 28, 4);
+
+/* reg_ralue_trap_id
+ * Trap ID to be reported to CPU.
+ * Trap ID is RTR_INGRESS0 or RTR_INGRESS1.
+ * For trap_action of NOP, MIRROR and DISCARD_ERROR, trap_id is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, trap_id, 0x20, 0, 9);
+
+/* reg_ralue_adjacency_index
+ * Points to the first entry of the group-based ECMP.
+ * Only relevant in case of REMOTE action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, adjacency_index, 0x24, 0, 24);
+
+/* reg_ralue_ecmp_size
+ * Amount of sequential entries starting
+ * from the adjacency_index (the number of ECMPs).
+ * The valid range is 1-64, 512, 1024, 2048 and 4096.
+ * Reserved when trap_action is TRAP or DISCARD_ERROR.
+ * Only relevant in case of REMOTE action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, ecmp_size, 0x28, 0, 13);
+
+/* reg_ralue_local_erif
+ * Egress Router Interface.
+ * Only relevant in case of LOCAL action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, local_erif, 0x24, 0, 16);
+
+/* reg_ralue_v
+ * Valid bit for the tunnel_ptr field.
+ * If valid = 0 then trap to CPU as IP2ME trap ID.
+ * If valid = 1 and the packet format allows NVE or IPinIP tunnel
+ * decapsulation then tunnel decapsulation is done.
+ * If valid = 1 and packet format does not allow NVE or IPinIP tunnel
+ * decapsulation then trap as IP2ME trap ID.
+ * Only relevant in case of IP2ME action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, v, 0x24, 31, 1);
+
+/* reg_ralue_tunnel_ptr
+ * Tunnel Pointer for NVE or IPinIP tunnel decapsulation.
+ * For Spectrum, pointer to KVD Linear.
+ * Only relevant in case of IP2ME action.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ralue, tunnel_ptr, 0x24, 0, 24);
+
+static inline void mlxsw_reg_ralue_pack(char *payload,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ enum mlxsw_reg_ralue_op op,
+ u16 virtual_router, u8 prefix_len)
+{
+ MLXSW_REG_ZERO(ralue, payload);
+ mlxsw_reg_ralue_protocol_set(payload, protocol);
+ mlxsw_reg_ralue_op_set(payload, op);
+ mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
+ mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
+ mlxsw_reg_ralue_entry_type_set(payload,
+ MLXSW_REG_RALUE_ENTRY_TYPE_ROUTE_ENTRY);
+ mlxsw_reg_ralue_bmp_len_set(payload, prefix_len);
+}
+
+static inline void mlxsw_reg_ralue_pack4(char *payload,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ enum mlxsw_reg_ralue_op op,
+ u16 virtual_router, u8 prefix_len,
+ u32 dip)
+{
+ mlxsw_reg_ralue_pack(payload, protocol, op, virtual_router, prefix_len);
+ mlxsw_reg_ralue_dip4_set(payload, dip);
+}
+
+static inline void
+mlxsw_reg_ralue_act_remote_pack(char *payload,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u32 adjacency_index, u16 ecmp_size)
+{
+ mlxsw_reg_ralue_action_type_set(payload,
+ MLXSW_REG_RALUE_ACTION_TYPE_REMOTE);
+ mlxsw_reg_ralue_trap_action_set(payload, trap_action);
+ mlxsw_reg_ralue_trap_id_set(payload, trap_id);
+ mlxsw_reg_ralue_adjacency_index_set(payload, adjacency_index);
+ mlxsw_reg_ralue_ecmp_size_set(payload, ecmp_size);
+}
+
+static inline void
+mlxsw_reg_ralue_act_local_pack(char *payload,
+ enum mlxsw_reg_ralue_trap_action trap_action,
+ u16 trap_id, u16 local_erif)
+{
+ mlxsw_reg_ralue_action_type_set(payload,
+ MLXSW_REG_RALUE_ACTION_TYPE_LOCAL);
+ mlxsw_reg_ralue_trap_action_set(payload, trap_action);
+ mlxsw_reg_ralue_trap_id_set(payload, trap_id);
+ mlxsw_reg_ralue_local_erif_set(payload, local_erif);
+}
+
+static inline void
+mlxsw_reg_ralue_act_ip2me_pack(char *payload)
+{
+ mlxsw_reg_ralue_action_type_set(payload,
+ MLXSW_REG_RALUE_ACTION_TYPE_IP2ME);
+}
+
+/* RAUHT - Router Algorithmic LPM Unicast Host Table Register
+ * ----------------------------------------------------------
+ * The RAUHT register is used to configure and query the Unicast Host table in
+ * devices that implement the Algorithmic LPM.
+ */
+#define MLXSW_REG_RAUHT_ID 0x8014
+#define MLXSW_REG_RAUHT_LEN 0x74
+
+static const struct mlxsw_reg_info mlxsw_reg_rauht = {
+ .id = MLXSW_REG_RAUHT_ID,
+ .len = MLXSW_REG_RAUHT_LEN,
+};
+
+enum mlxsw_reg_rauht_type {
+ MLXSW_REG_RAUHT_TYPE_IPV4,
+ MLXSW_REG_RAUHT_TYPE_IPV6,
+};
+
+/* reg_rauht_type
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauht, type, 0x00, 24, 2);
+
+enum mlxsw_reg_rauht_op {
+ MLXSW_REG_RAUHT_OP_QUERY_READ = 0,
+ /* Read operation */
+ MLXSW_REG_RAUHT_OP_QUERY_CLEAR_ON_READ = 1,
+ /* Clear on read operation. Used to read entry and clear
+ * activity bit.
+ */
+ MLXSW_REG_RAUHT_OP_WRITE_ADD = 0,
+ /* Add. Used to write a new entry to the table. All R/W fields are
+ * relevant for new entry. Activity bit is set for new entries.
+ */
+ MLXSW_REG_RAUHT_OP_WRITE_UPDATE = 1,
+ /* Update action. Used to update an existing route entry and
+ * only update the following fields:
+ * trap_action, trap_id, mac, counter_set_type, counter_index
+ */
+ MLXSW_REG_RAUHT_OP_WRITE_CLEAR_ACTIVITY = 2,
+ /* Clear activity. A bit is cleared for the entry. */
+ MLXSW_REG_RAUHT_OP_WRITE_DELETE = 3,
+ /* Delete entry */
+ MLXSW_REG_RAUHT_OP_WRITE_DELETE_ALL = 4,
+ /* Delete all host entries on a RIF. In this command, dip
+ * field is reserved.
+ */
+};
+
+/* reg_rauht_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, rauht, op, 0x00, 20, 3);
+
+/* reg_rauht_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on
+ * the specific entry.
+ * To clear the a bit, use "clear activity" op.
+ * Enabled by activity_dis in RGCR
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, rauht, a, 0x00, 16, 1);
+
+/* reg_rauht_rif
+ * Router Interface
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauht, rif, 0x00, 0, 16);
+
+/* reg_rauht_dip*
+ * Destination address.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauht, dip4, 0x1C, 0x0, 32);
+
+enum mlxsw_reg_rauht_trap_action {
+ MLXSW_REG_RAUHT_TRAP_ACTION_NOP,
+ MLXSW_REG_RAUHT_TRAP_ACTION_TRAP,
+ MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR_TO_CPU,
+ MLXSW_REG_RAUHT_TRAP_ACTION_MIRROR,
+ MLXSW_REG_RAUHT_TRAP_ACTION_DISCARD_ERRORS,
+};
+
+/* reg_rauht_trap_action
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, trap_action, 0x60, 28, 4);
+
+enum mlxsw_reg_rauht_trap_id {
+ MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS0,
+ MLXSW_REG_RAUHT_TRAP_ID_RTR_EGRESS1,
+};
+
+/* reg_rauht_trap_id
+ * Trap ID to be reported to CPU.
+ * Trap-ID is RTR_EGRESS0 or RTR_EGRESS1.
+ * For trap_action of NOP, MIRROR and DISCARD_ERROR,
+ * trap_id is reserved.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, trap_id, 0x60, 0, 9);
+
+/* reg_rauht_counter_set_type
+ * Counter set type for flow counters
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, counter_set_type, 0x68, 24, 8);
+
+/* reg_rauht_counter_index
+ * Counter index for flow counters
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, rauht, counter_index, 0x68, 0, 24);
+
+/* reg_rauht_mac
+ * MAC address.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rauht, mac, 0x6E, 6);
+
+static inline void mlxsw_reg_rauht_pack(char *payload,
+ enum mlxsw_reg_rauht_op op, u16 rif,
+ const char *mac)
+{
+ MLXSW_REG_ZERO(rauht, payload);
+ mlxsw_reg_rauht_op_set(payload, op);
+ mlxsw_reg_rauht_rif_set(payload, rif);
+ mlxsw_reg_rauht_mac_memcpy_to(payload, mac);
+}
+
+static inline void mlxsw_reg_rauht_pack4(char *payload,
+ enum mlxsw_reg_rauht_op op, u16 rif,
+ const char *mac, u32 dip)
+{
+ mlxsw_reg_rauht_pack(payload, op, rif, mac);
+ mlxsw_reg_rauht_dip4_set(payload, dip);
+}
+
+/* RALEU - Router Algorithmic LPM ECMP Update Register
+ * ---------------------------------------------------
+ * The register enables updating the ECMP section in the action for multiple
+ * LPM Unicast entries in a single operation. The update is executed to
+ * all entries of a {virtual router, protocol} tuple using the same ECMP group.
+ */
+#define MLXSW_REG_RALEU_ID 0x8015
+#define MLXSW_REG_RALEU_LEN 0x28
+
+static const struct mlxsw_reg_info mlxsw_reg_raleu = {
+ .id = MLXSW_REG_RALEU_ID,
+ .len = MLXSW_REG_RALEU_LEN,
+};
+
+/* reg_raleu_protocol
+ * Protocol.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, protocol, 0x00, 24, 4);
+
+/* reg_raleu_virtual_router
+ * Virtual Router ID
+ * Range is 0..cap_max_virtual_routers-1
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, virtual_router, 0x00, 0, 16);
+
+/* reg_raleu_adjacency_index
+ * Adjacency Index used for matching on the existing entries.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, adjacency_index, 0x10, 0, 24);
+
+/* reg_raleu_ecmp_size
+ * ECMP Size used for matching on the existing entries.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, raleu, ecmp_size, 0x14, 0, 13);
+
+/* reg_raleu_new_adjacency_index
+ * New Adjacency Index.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, raleu, new_adjacency_index, 0x20, 0, 24);
+
+/* reg_raleu_new_ecmp_size
+ * New ECMP Size.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, raleu, new_ecmp_size, 0x24, 0, 13);
+
+static inline void mlxsw_reg_raleu_pack(char *payload,
+ enum mlxsw_reg_ralxx_protocol protocol,
+ u16 virtual_router,
+ u32 adjacency_index, u16 ecmp_size,
+ u32 new_adjacency_index,
+ u16 new_ecmp_size)
+{
+ MLXSW_REG_ZERO(raleu, payload);
+ mlxsw_reg_raleu_protocol_set(payload, protocol);
+ mlxsw_reg_raleu_virtual_router_set(payload, virtual_router);
+ mlxsw_reg_raleu_adjacency_index_set(payload, adjacency_index);
+ mlxsw_reg_raleu_ecmp_size_set(payload, ecmp_size);
+ mlxsw_reg_raleu_new_adjacency_index_set(payload, new_adjacency_index);
+ mlxsw_reg_raleu_new_ecmp_size_set(payload, new_ecmp_size);
+}
+
+/* RAUHTD - Router Algorithmic LPM Unicast Host Table Dump Register
+ * ----------------------------------------------------------------
+ * The RAUHTD register allows dumping entries from the Router Unicast Host
+ * Table. For a given session an entry is dumped no more than one time. The
+ * first RAUHTD access after reset is a new session. A session ends when the
+ * num_rec response is smaller than num_rec request or for IPv4 when the
+ * num_entries is smaller than 4. The clear activity affect the current session
+ * or the last session if a new session has not started.
+ */
+#define MLXSW_REG_RAUHTD_ID 0x8018
+#define MLXSW_REG_RAUHTD_BASE_LEN 0x20
+#define MLXSW_REG_RAUHTD_REC_LEN 0x20
+#define MLXSW_REG_RAUHTD_REC_MAX_NUM 32
+#define MLXSW_REG_RAUHTD_LEN (MLXSW_REG_RAUHTD_BASE_LEN + \
+ MLXSW_REG_RAUHTD_REC_MAX_NUM * MLXSW_REG_RAUHTD_REC_LEN)
+#define MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC 4
+
+static const struct mlxsw_reg_info mlxsw_reg_rauhtd = {
+ .id = MLXSW_REG_RAUHTD_ID,
+ .len = MLXSW_REG_RAUHTD_LEN,
+};
+
+#define MLXSW_REG_RAUHTD_FILTER_A BIT(0)
+#define MLXSW_REG_RAUHTD_FILTER_RIF BIT(3)
+
+/* reg_rauhtd_filter_fields
+ * if a bit is '0' then the relevant field is ignored and dump is done
+ * regardless of the field value
+ * Bit0 - filter by activity: entry_a
+ * Bit3 - filter by entry rip: entry_rif
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, filter_fields, 0x00, 0, 8);
+
+enum mlxsw_reg_rauhtd_op {
+ MLXSW_REG_RAUHTD_OP_DUMP,
+ MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR,
+};
+
+/* reg_rauhtd_op
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, rauhtd, op, 0x04, 24, 2);
+
+/* reg_rauhtd_num_rec
+ * At request: number of records requested
+ * At response: number of records dumped
+ * For IPv4, each record has 4 entries at request and up to 4 entries
+ * at response
+ * Range is 0..MLXSW_REG_RAUHTD_REC_MAX_NUM
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, num_rec, 0x04, 0, 8);
+
+/* reg_rauhtd_entry_a
+ * Dump only if activity has value of entry_a
+ * Reserved if filter_fields bit0 is '0'
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, entry_a, 0x08, 16, 1);
+
+enum mlxsw_reg_rauhtd_type {
+ MLXSW_REG_RAUHTD_TYPE_IPV4,
+ MLXSW_REG_RAUHTD_TYPE_IPV6,
+};
+
+/* reg_rauhtd_type
+ * Dump only if record type is:
+ * 0 - IPv4
+ * 1 - IPv6
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, type, 0x08, 0, 4);
+
+/* reg_rauhtd_entry_rif
+ * Dump only if RIF has value of entry_rif
+ * Reserved if filter_fields bit3 is '0'
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rauhtd, entry_rif, 0x0C, 0, 16);
+
+static inline void mlxsw_reg_rauhtd_pack(char *payload,
+ enum mlxsw_reg_rauhtd_type type)
+{
+ MLXSW_REG_ZERO(rauhtd, payload);
+ mlxsw_reg_rauhtd_filter_fields_set(payload, MLXSW_REG_RAUHTD_FILTER_A);
+ mlxsw_reg_rauhtd_op_set(payload, MLXSW_REG_RAUHTD_OP_DUMP_AND_CLEAR);
+ mlxsw_reg_rauhtd_num_rec_set(payload, MLXSW_REG_RAUHTD_REC_MAX_NUM);
+ mlxsw_reg_rauhtd_entry_a_set(payload, 1);
+ mlxsw_reg_rauhtd_type_set(payload, type);
+}
+
+/* reg_rauhtd_ipv4_rec_num_entries
+ * Number of valid entries in this record:
+ * 0 - 1 valid entry
+ * 1 - 2 valid entries
+ * 2 - 3 valid entries
+ * 3 - 4 valid entries
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_rec_num_entries,
+ MLXSW_REG_RAUHTD_BASE_LEN, 28, 2,
+ MLXSW_REG_RAUHTD_REC_LEN, 0x00, false);
+
+/* reg_rauhtd_rec_type
+ * Record type.
+ * 0 - IPv4
+ * 1 - IPv6
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, rec_type, MLXSW_REG_RAUHTD_BASE_LEN, 24, 2,
+ MLXSW_REG_RAUHTD_REC_LEN, 0x00, false);
+
+#define MLXSW_REG_RAUHTD_IPV4_ENT_LEN 0x8
+
+/* reg_rauhtd_ipv4_ent_a
+ * Activity. Set for new entries. Set if a packet lookup has hit on the
+ * specific entry.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_a, MLXSW_REG_RAUHTD_BASE_LEN, 16, 1,
+ MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false);
+
+/* reg_rauhtd_ipv4_ent_rif
+ * Router interface.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_rif, MLXSW_REG_RAUHTD_BASE_LEN, 0,
+ 16, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x00, false);
+
+/* reg_rauhtd_ipv4_ent_dip
+ * Destination IPv4 address.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, rauhtd, ipv4_ent_dip, MLXSW_REG_RAUHTD_BASE_LEN, 0,
+ 32, MLXSW_REG_RAUHTD_IPV4_ENT_LEN, 0x04, false);
+
+static inline void mlxsw_reg_rauhtd_ent_ipv4_unpack(char *payload,
+ int ent_index, u16 *p_rif,
+ u32 *p_dip)
+{
+ *p_rif = mlxsw_reg_rauhtd_ipv4_ent_rif_get(payload, ent_index);
+ *p_dip = mlxsw_reg_rauhtd_ipv4_ent_dip_get(payload, ent_index);
+}
+
/* MFCR - Management Fan Control Register
* --------------------------------------
* This register controls the settings of the Fan Speed PWM mechanism.
@@ -2929,6 +4644,123 @@ static inline void mlxsw_reg_mtmp_unpack(char *payload, unsigned int *p_temp,
mlxsw_reg_mtmp_sensor_name_memcpy_from(payload, sensor_name);
}
+/* MPAT - Monitoring Port Analyzer Table
+ * -------------------------------------
+ * MPAT Register is used to query and configure the Switch PortAnalyzer Table.
+ * For an enabled analyzer, all fields except e (enable) cannot be modified.
+ */
+#define MLXSW_REG_MPAT_ID 0x901A
+#define MLXSW_REG_MPAT_LEN 0x78
+
+static const struct mlxsw_reg_info mlxsw_reg_mpat = {
+ .id = MLXSW_REG_MPAT_ID,
+ .len = MLXSW_REG_MPAT_LEN,
+};
+
+/* reg_mpat_pa_id
+ * Port Analyzer ID.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mpat, pa_id, 0x00, 28, 4);
+
+/* reg_mpat_system_port
+ * A unique port identifier for the final destination of the packet.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, system_port, 0x00, 0, 16);
+
+/* reg_mpat_e
+ * Enable. Indicating the Port Analyzer is enabled.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, e, 0x04, 31, 1);
+
+/* reg_mpat_qos
+ * Quality Of Service Mode.
+ * 0: CONFIGURED - QoS parameters (Switch Priority, and encapsulation
+ * PCP, DEI, DSCP or VL) are configured.
+ * 1: MAINTAIN - QoS parameters (Switch Priority, Color) are the
+ * same as in the original packet that has triggered the mirroring. For
+ * SPAN also the pcp,dei are maintained.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1);
+
+/* reg_mpat_be
+ * Best effort mode. Indicates mirroring traffic should not cause packet
+ * drop or back pressure, but will discard the mirrored packets. Mirrored
+ * packets will be forwarded on a best effort manner.
+ * 0: Do not discard mirrored packets
+ * 1: Discard mirrored packets if causing congestion
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, be, 0x04, 25, 1);
+
+static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id,
+ u16 system_port, bool e)
+{
+ MLXSW_REG_ZERO(mpat, payload);
+ mlxsw_reg_mpat_pa_id_set(payload, pa_id);
+ mlxsw_reg_mpat_system_port_set(payload, system_port);
+ mlxsw_reg_mpat_e_set(payload, e);
+ mlxsw_reg_mpat_qos_set(payload, 1);
+ mlxsw_reg_mpat_be_set(payload, 1);
+}
+
+/* MPAR - Monitoring Port Analyzer Register
+ * ----------------------------------------
+ * MPAR register is used to query and configure the port analyzer port mirroring
+ * properties.
+ */
+#define MLXSW_REG_MPAR_ID 0x901B
+#define MLXSW_REG_MPAR_LEN 0x08
+
+static const struct mlxsw_reg_info mlxsw_reg_mpar = {
+ .id = MLXSW_REG_MPAR_ID,
+ .len = MLXSW_REG_MPAR_LEN,
+};
+
+/* reg_mpar_local_port
+ * The local port to mirror the packets from.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mpar, local_port, 0x00, 16, 8);
+
+enum mlxsw_reg_mpar_i_e {
+ MLXSW_REG_MPAR_TYPE_EGRESS,
+ MLXSW_REG_MPAR_TYPE_INGRESS,
+};
+
+/* reg_mpar_i_e
+ * Ingress/Egress
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mpar, i_e, 0x00, 0, 4);
+
+/* reg_mpar_enable
+ * Enable mirroring
+ * By default, port mirroring is disabled for all ports.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpar, enable, 0x04, 31, 1);
+
+/* reg_mpar_pa_id
+ * Port Analyzer ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpar, pa_id, 0x04, 0, 4);
+
+static inline void mlxsw_reg_mpar_pack(char *payload, u8 local_port,
+ enum mlxsw_reg_mpar_i_e i_e,
+ bool enable, u8 pa_id)
+{
+ MLXSW_REG_ZERO(mpar, payload);
+ mlxsw_reg_mpar_local_port_set(payload, local_port);
+ mlxsw_reg_mpar_enable_set(payload, enable);
+ mlxsw_reg_mpar_i_e_set(payload, i_e);
+ mlxsw_reg_mpar_pa_id_set(payload, pa_id);
+}
+
/* MLCR - Management LED Control Register
* --------------------------------------
* Controls the system LEDs.
@@ -2985,9 +4817,10 @@ static const struct mlxsw_reg_info mlxsw_reg_sbpr = {
.len = MLXSW_REG_SBPR_LEN,
};
-enum mlxsw_reg_sbpr_dir {
- MLXSW_REG_SBPR_DIR_INGRESS,
- MLXSW_REG_SBPR_DIR_EGRESS,
+/* shared direstion enum for SBPR, SBCM, SBPM */
+enum mlxsw_reg_sbxx_dir {
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ MLXSW_REG_SBXX_DIR_EGRESS,
};
/* reg_sbpr_dir
@@ -3020,7 +4853,7 @@ enum mlxsw_reg_sbpr_mode {
MLXSW_ITEM32(reg, sbpr, mode, 0x08, 0, 4);
static inline void mlxsw_reg_sbpr_pack(char *payload, u8 pool,
- enum mlxsw_reg_sbpr_dir dir,
+ enum mlxsw_reg_sbxx_dir dir,
enum mlxsw_reg_sbpr_mode mode, u32 size)
{
MLXSW_REG_ZERO(sbpr, payload);
@@ -3062,11 +4895,6 @@ MLXSW_ITEM32(reg, sbcm, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, sbcm, pg_buff, 0x00, 8, 6);
-enum mlxsw_reg_sbcm_dir {
- MLXSW_REG_SBCM_DIR_INGRESS,
- MLXSW_REG_SBCM_DIR_EGRESS,
-};
-
/* reg_sbcm_dir
* Direction.
* Access: Index
@@ -3079,6 +4907,10 @@ MLXSW_ITEM32(reg, sbcm, dir, 0x00, 0, 2);
*/
MLXSW_ITEM32(reg, sbcm, min_buff, 0x18, 0, 24);
+/* shared max_buff limits for dynamic threshold for SBCM, SBPM */
+#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN 1
+#define MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX 14
+
/* reg_sbcm_max_buff
* When the pool associated to the port-pg/tclass is configured to
* static, Maximum buffer size for the limiter configured in cells.
@@ -3099,7 +4931,7 @@ MLXSW_ITEM32(reg, sbcm, max_buff, 0x1C, 0, 24);
MLXSW_ITEM32(reg, sbcm, pool, 0x24, 0, 4);
static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
- enum mlxsw_reg_sbcm_dir dir,
+ enum mlxsw_reg_sbxx_dir dir,
u32 min_buff, u32 max_buff, u8 pool)
{
MLXSW_REG_ZERO(sbcm, payload);
@@ -3111,8 +4943,8 @@ static inline void mlxsw_reg_sbcm_pack(char *payload, u8 local_port, u8 pg_buff,
mlxsw_reg_sbcm_pool_set(payload, pool);
}
-/* SBPM - Shared Buffer Class Management Register
- * ----------------------------------------------
+/* SBPM - Shared Buffer Port Management Register
+ * ---------------------------------------------
* The SBPM register configures and retrieves the shared buffer allocation
* and configuration according to Port-Pool, including the definition
* of the associated quota.
@@ -3139,17 +4971,33 @@ MLXSW_ITEM32(reg, sbpm, local_port, 0x00, 16, 8);
*/
MLXSW_ITEM32(reg, sbpm, pool, 0x00, 8, 4);
-enum mlxsw_reg_sbpm_dir {
- MLXSW_REG_SBPM_DIR_INGRESS,
- MLXSW_REG_SBPM_DIR_EGRESS,
-};
-
/* reg_sbpm_dir
* Direction.
* Access: Index
*/
MLXSW_ITEM32(reg, sbpm, dir, 0x00, 0, 2);
+/* reg_sbpm_buff_occupancy
+ * Current buffer occupancy in cells.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, sbpm, buff_occupancy, 0x10, 0, 24);
+
+/* reg_sbpm_clr
+ * Clear Max Buffer Occupancy
+ * When this bit is set, max_buff_occupancy field is cleared (and a
+ * new max value is tracked from the time the clear was performed).
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sbpm, clr, 0x14, 31, 1);
+
+/* reg_sbpm_max_buff_occupancy
+ * Maximum value of buffer occupancy in cells monitored. Cleared by
+ * writing to the clr field.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, sbpm, max_buff_occupancy, 0x14, 0, 24);
+
/* reg_sbpm_min_buff
* Minimum buffer size for the limiter, in cells.
* Access: RW
@@ -3170,17 +5018,25 @@ MLXSW_ITEM32(reg, sbpm, min_buff, 0x18, 0, 24);
MLXSW_ITEM32(reg, sbpm, max_buff, 0x1C, 0, 24);
static inline void mlxsw_reg_sbpm_pack(char *payload, u8 local_port, u8 pool,
- enum mlxsw_reg_sbpm_dir dir,
+ enum mlxsw_reg_sbxx_dir dir, bool clr,
u32 min_buff, u32 max_buff)
{
MLXSW_REG_ZERO(sbpm, payload);
mlxsw_reg_sbpm_local_port_set(payload, local_port);
mlxsw_reg_sbpm_pool_set(payload, pool);
mlxsw_reg_sbpm_dir_set(payload, dir);
+ mlxsw_reg_sbpm_clr_set(payload, clr);
mlxsw_reg_sbpm_min_buff_set(payload, min_buff);
mlxsw_reg_sbpm_max_buff_set(payload, max_buff);
}
+static inline void mlxsw_reg_sbpm_unpack(char *payload, u32 *p_buff_occupancy,
+ u32 *p_max_buff_occupancy)
+{
+ *p_buff_occupancy = mlxsw_reg_sbpm_buff_occupancy_get(payload);
+ *p_max_buff_occupancy = mlxsw_reg_sbpm_max_buff_occupancy_get(payload);
+}
+
/* SBMM - Shared Buffer Multicast Management Register
* --------------------------------------------------
* The SBMM register configures and retrieves the shared buffer allocation
@@ -3236,6 +5092,143 @@ static inline void mlxsw_reg_sbmm_pack(char *payload, u8 prio, u32 min_buff,
mlxsw_reg_sbmm_pool_set(payload, pool);
}
+/* SBSR - Shared Buffer Status Register
+ * ------------------------------------
+ * The SBSR register retrieves the shared buffer occupancy according to
+ * Port-Pool. Note that this register enables reading a large amount of data.
+ * It is the user's responsibility to limit the amount of data to ensure the
+ * response can match the maximum transfer unit. In case the response exceeds
+ * the maximum transport unit, it will be truncated with no special notice.
+ */
+#define MLXSW_REG_SBSR_ID 0xB005
+#define MLXSW_REG_SBSR_BASE_LEN 0x5C /* base length, without records */
+#define MLXSW_REG_SBSR_REC_LEN 0x8 /* record length */
+#define MLXSW_REG_SBSR_REC_MAX_COUNT 120
+#define MLXSW_REG_SBSR_LEN (MLXSW_REG_SBSR_BASE_LEN + \
+ MLXSW_REG_SBSR_REC_LEN * \
+ MLXSW_REG_SBSR_REC_MAX_COUNT)
+
+static const struct mlxsw_reg_info mlxsw_reg_sbsr = {
+ .id = MLXSW_REG_SBSR_ID,
+ .len = MLXSW_REG_SBSR_LEN,
+};
+
+/* reg_sbsr_clr
+ * Clear Max Buffer Occupancy. When this bit is set, the max_buff_occupancy
+ * field is cleared (and a new max value is tracked from the time the clear
+ * was performed).
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, sbsr, clr, 0x00, 31, 1);
+
+/* reg_sbsr_ingress_port_mask
+ * Bit vector for all ingress network ports.
+ * Indicates which of the ports (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other port
+ * does not change.
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, ingress_port_mask, 0x10, 0x20, 1);
+
+/* reg_sbsr_pg_buff_mask
+ * Bit vector for all switch priority groups.
+ * Indicates which of the priorities (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other priority
+ * does not change.
+ * Range is 0..cap_max_pg_buffers - 1
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, pg_buff_mask, 0x30, 0x4, 1);
+
+/* reg_sbsr_egress_port_mask
+ * Bit vector for all egress network ports.
+ * Indicates which of the ports (for which the relevant bit is set)
+ * are affected by the set operation. Configuration of any other port
+ * does not change.
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, egress_port_mask, 0x34, 0x20, 1);
+
+/* reg_sbsr_tclass_mask
+ * Bit vector for all traffic classes.
+ * Indicates which of the traffic classes (for which the relevant bit is
+ * set) are affected by the set operation. Configuration of any other
+ * traffic class does not change.
+ * Range is 0..cap_max_tclass - 1
+ * Access: Index
+ */
+MLXSW_ITEM_BIT_ARRAY(reg, sbsr, tclass_mask, 0x54, 0x8, 1);
+
+static inline void mlxsw_reg_sbsr_pack(char *payload, bool clr)
+{
+ MLXSW_REG_ZERO(sbsr, payload);
+ mlxsw_reg_sbsr_clr_set(payload, clr);
+}
+
+/* reg_sbsr_rec_buff_occupancy
+ * Current buffer occupancy in cells.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sbsr, rec_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
+ 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x00, false);
+
+/* reg_sbsr_rec_max_buff_occupancy
+ * Maximum value of buffer occupancy in cells monitored. Cleared by
+ * writing to the clr field.
+ * Access: RO
+ */
+MLXSW_ITEM32_INDEXED(reg, sbsr, rec_max_buff_occupancy, MLXSW_REG_SBSR_BASE_LEN,
+ 0, 24, MLXSW_REG_SBSR_REC_LEN, 0x04, false);
+
+static inline void mlxsw_reg_sbsr_rec_unpack(char *payload, int rec_index,
+ u32 *p_buff_occupancy,
+ u32 *p_max_buff_occupancy)
+{
+ *p_buff_occupancy =
+ mlxsw_reg_sbsr_rec_buff_occupancy_get(payload, rec_index);
+ *p_max_buff_occupancy =
+ mlxsw_reg_sbsr_rec_max_buff_occupancy_get(payload, rec_index);
+}
+
+/* SBIB - Shared Buffer Internal Buffer Register
+ * ---------------------------------------------
+ * The SBIB register configures per port buffers for internal use. The internal
+ * buffers consume memory on the port buffers (note that the port buffers are
+ * used also by PBMC).
+ *
+ * For Spectrum this is used for egress mirroring.
+ */
+#define MLXSW_REG_SBIB_ID 0xB006
+#define MLXSW_REG_SBIB_LEN 0x10
+
+static const struct mlxsw_reg_info mlxsw_reg_sbib = {
+ .id = MLXSW_REG_SBIB_ID,
+ .len = MLXSW_REG_SBIB_LEN,
+};
+
+/* reg_sbib_local_port
+ * Local port number
+ * Not supported for CPU port and router port
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, sbib, local_port, 0x00, 16, 8);
+
+/* reg_sbib_buff_size
+ * Units represented in cells
+ * Allowed range is 0 to (cap_max_headroom_size - 1)
+ * Default is 0
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, sbib, buff_size, 0x08, 0, 24);
+
+static inline void mlxsw_reg_sbib_pack(char *payload, u8 local_port,
+ u32 buff_size)
+{
+ MLXSW_REG_ZERO(sbib, payload);
+ mlxsw_reg_sbib_local_port_set(payload, local_port);
+ mlxsw_reg_sbib_buff_size_set(payload, buff_size);
+}
+
static inline const char *mlxsw_reg_id_str(u16 reg_id)
{
switch (reg_id) {
@@ -3283,6 +5276,10 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "SFMR";
case MLXSW_REG_SPVMLR_ID:
return "SPVMLR";
+ case MLXSW_REG_QTCT_ID:
+ return "QTCT";
+ case MLXSW_REG_QEEC_ID:
+ return "QEEC";
case MLXSW_REG_PMLP_ID:
return "PMLP";
case MLXSW_REG_PMTU_ID:
@@ -3293,8 +5290,12 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "PPAD";
case MLXSW_REG_PAOS_ID:
return "PAOS";
+ case MLXSW_REG_PFCC_ID:
+ return "PFCC";
case MLXSW_REG_PPCNT_ID:
return "PPCNT";
+ case MLXSW_REG_PPTB_ID:
+ return "PPTB";
case MLXSW_REG_PBMC_ID:
return "PBMC";
case MLXSW_REG_PSPA_ID:
@@ -3303,6 +5304,26 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "HTGT";
case MLXSW_REG_HPKT_ID:
return "HPKT";
+ case MLXSW_REG_RGCR_ID:
+ return "RGCR";
+ case MLXSW_REG_RITR_ID:
+ return "RITR";
+ case MLXSW_REG_RATR_ID:
+ return "RATR";
+ case MLXSW_REG_RALTA_ID:
+ return "RALTA";
+ case MLXSW_REG_RALST_ID:
+ return "RALST";
+ case MLXSW_REG_RALTB_ID:
+ return "RALTB";
+ case MLXSW_REG_RALUE_ID:
+ return "RALUE";
+ case MLXSW_REG_RAUHT_ID:
+ return "RAUHT";
+ case MLXSW_REG_RALEU_ID:
+ return "RALEU";
+ case MLXSW_REG_RAUHTD_ID:
+ return "RAUHTD";
case MLXSW_REG_MFCR_ID:
return "MFCR";
case MLXSW_REG_MFSC_ID:
@@ -3311,6 +5332,10 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "MFSM";
case MLXSW_REG_MTCAP_ID:
return "MTCAP";
+ case MLXSW_REG_MPAT_ID:
+ return "MPAT";
+ case MLXSW_REG_MPAR_ID:
+ return "MPAR";
case MLXSW_REG_MTMP_ID:
return "MTMP";
case MLXSW_REG_MLCR_ID:
@@ -3323,6 +5348,10 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "SBPM";
case MLXSW_REG_SBMM_ID:
return "SBMM";
+ case MLXSW_REG_SBSR_ID:
+ return "SBSR";
+ case MLXSW_REG_SBIB_ID:
+ return "SBIB";
default:
return "*UNKNOWN*";
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 668b2f465ca5..d48873bcbddf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -49,9 +49,14 @@
#include <linux/jiffies.h>
#include <linux/bitops.h>
#include <linux/list.h>
-#include <net/devlink.h>
+#include <linux/notifier.h>
+#include <linux/dcbnl.h>
+#include <linux/inetdevice.h>
#include <net/switchdev.h>
#include <generated/utsrelease.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/netevent.h>
#include "spectrum.h"
#include "core.h"
@@ -131,6 +136,8 @@ MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
*/
MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
+static bool mlxsw_sp_port_dev_check(const struct net_device *dev);
+
static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
const struct mlxsw_tx_info *tx_info)
{
@@ -159,6 +166,303 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_resources *resources;
+ int i;
+
+ resources = mlxsw_core_resources_get(mlxsw_sp->core);
+ if (!resources->max_span_valid)
+ return -EIO;
+
+ mlxsw_sp->span.entries_count = resources->max_span;
+ mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
+ sizeof(struct mlxsw_sp_span_entry),
+ GFP_KERNEL);
+ if (!mlxsw_sp->span.entries)
+ return -ENOMEM;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++)
+ INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
+
+ return 0;
+}
+
+static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
+ }
+ kfree(mlxsw_sp->span.entries);
+}
+
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ struct mlxsw_sp_span_entry *span_entry;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ u8 local_port = port->local_port;
+ int index;
+ int i;
+ int err;
+
+ /* find a free entry to use */
+ index = -1;
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ if (!mlxsw_sp->span.entries[i].used) {
+ index = i;
+ span_entry = &mlxsw_sp->span.entries[i];
+ break;
+ }
+ }
+ if (index < 0)
+ return NULL;
+
+ /* create a new port analayzer entry for local_port */
+ mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+ if (err)
+ return NULL;
+
+ span_entry->used = true;
+ span_entry->id = index;
+ span_entry->ref_count = 0;
+ span_entry->local_port = local_port;
+ return span_entry;
+}
+
+static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ u8 local_port = span_entry->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+ span_entry->used = false;
+}
+
+struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_find(struct mlxsw_sp_port *port)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ if (curr->used && curr->local_port == port->local_port)
+ return curr;
+ }
+ return NULL;
+}
+
+struct mlxsw_sp_span_entry *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
+{
+ struct mlxsw_sp_span_entry *span_entry;
+
+ span_entry = mlxsw_sp_span_entry_find(port);
+ if (span_entry) {
+ span_entry->ref_count++;
+ return span_entry;
+ }
+
+ return mlxsw_sp_span_entry_create(port);
+}
+
+static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ if (--span_entry->ref_count == 0)
+ mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
+ return 0;
+}
+
+static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ struct mlxsw_sp_span_inspected_port *p;
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ list_for_each_entry(p, &curr->bound_ports_list, list)
+ if (p->local_port == port->local_port &&
+ p->type == MLXSW_SP_SPAN_EGRESS)
+ return true;
+ }
+
+ return false;
+}
+
+static int mlxsw_sp_span_mtu_to_buffsize(int mtu)
+{
+ return MLXSW_SP_BYTES_TO_CELLS(mtu * 5 / 2) + 1;
+}
+
+static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ int err;
+
+ /* If port is egress mirrored, the shared buffer size should be
+ * updated according to the mtu value
+ */
+ if (mlxsw_sp_span_is_egress_mirror(port)) {
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
+ mlxsw_sp_span_mtu_to_buffsize(mtu));
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ if (err) {
+ netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static struct mlxsw_sp_span_inspected_port *
+mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ struct mlxsw_sp_span_inspected_port *p;
+
+ list_for_each_entry(p, &span_entry->bound_ports_list, list)
+ if (port->local_port == p->local_port)
+ return p;
+ return NULL;
+}
+
+static int
+mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type)
+{
+ struct mlxsw_sp_span_inspected_port *inspected_port;
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char mpar_pl[MLXSW_REG_MPAR_LEN];
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ int pa_id = span_entry->id;
+ int err;
+
+ /* if it is an egress SPAN, bind a shared buffer to it */
+ if (type == MLXSW_SP_SPAN_EGRESS) {
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port,
+ mlxsw_sp_span_mtu_to_buffsize(port->dev->mtu));
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ if (err) {
+ netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
+ return err;
+ }
+ }
+
+ /* bind the port to the SPAN entry */
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, true, pa_id);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+ if (err)
+ goto err_mpar_reg_write;
+
+ inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
+ if (!inspected_port) {
+ err = -ENOMEM;
+ goto err_inspected_port_alloc;
+ }
+ inspected_port->local_port = port->local_port;
+ inspected_port->type = type;
+ list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
+
+ return 0;
+
+err_mpar_reg_write:
+err_inspected_port_alloc:
+ if (type == MLXSW_SP_SPAN_EGRESS) {
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ }
+ return err;
+}
+
+static void
+mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type)
+{
+ struct mlxsw_sp_span_inspected_port *inspected_port;
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char mpar_pl[MLXSW_REG_MPAR_LEN];
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ int pa_id = span_entry->id;
+
+ inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
+ if (!inspected_port)
+ return;
+
+ /* remove the inspected port */
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port, type, false, pa_id);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+
+ /* remove the SBIB buffer if it was egress SPAN */
+ if (type == MLXSW_SP_SPAN_EGRESS) {
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ }
+
+ mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+
+ list_del(&inspected_port->list);
+ kfree(inspected_port);
+}
+
+static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ struct mlxsw_sp_port *to,
+ enum mlxsw_sp_span_type type)
+{
+ struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
+ struct mlxsw_sp_span_entry *span_entry;
+ int err;
+
+ span_entry = mlxsw_sp_span_entry_get(to);
+ if (!span_entry)
+ return -ENOENT;
+
+ netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
+ span_entry->id);
+
+ err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
+ if (err)
+ goto err_port_bind;
+
+ return 0;
+
+err_port_bind:
+ mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+ return err;
+}
+
+static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
+ struct mlxsw_sp_port *to,
+ enum mlxsw_sp_span_type type)
+{
+ struct mlxsw_sp_span_entry *span_entry;
+
+ span_entry = mlxsw_sp_span_entry_find(to);
+ if (!span_entry) {
+ netdev_err(from->dev, "no span entry found\n");
+ return;
+ }
+
+ netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
+ span_entry->id);
+ mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
+}
+
static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_up)
{
@@ -171,23 +475,6 @@ static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
}
-static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
- bool *p_is_up)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char paos_pl[MLXSW_REG_PAOS_LEN];
- u8 oper_status;
- int err;
-
- mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port, 0);
- err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
- if (err)
- return err;
- oper_status = mlxsw_reg_paos_oper_status_get(paos_pl);
- *p_is_up = oper_status == MLXSW_PORT_ADMIN_STATUS_UP ? true : false;
- return 0;
-}
-
static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
unsigned char *addr)
{
@@ -209,23 +496,6 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
}
-static int mlxsw_sp_port_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid, enum mlxsw_reg_spms_state state)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char *spms_pl;
- int err;
-
- spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
- if (!spms_pl)
- return -ENOMEM;
- mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
- mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
- kfree(spms_pl);
- return err;
-}
-
static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -247,15 +517,23 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
}
-static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+static int __mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 swid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pspa_pl[MLXSW_REG_PSPA_LEN];
- mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+ mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
}
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+
+ return __mlxsw_sp_port_swid_set(mlxsw_sp, mlxsw_sp_port->local_port,
+ swid);
+}
+
static int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable)
{
@@ -307,7 +585,7 @@ mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
u8 local_port, u8 *p_module,
- u8 *p_width)
+ u8 *p_width, u8 *p_lane)
{
char pmlp_pl[MLXSW_REG_PMLP_LEN];
int err;
@@ -318,6 +596,7 @@ static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
return err;
*p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
*p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
+ *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
return 0;
}
@@ -379,7 +658,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
u64 len;
int err;
- if (mlxsw_core_skb_transmit_busy(mlxsw_sp, &tx_info))
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
return NETDEV_TX_BUSY;
if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
@@ -399,11 +678,15 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
}
mlxsw_sp_txhdr_construct(skb, &tx_info);
- len = skb->len;
+ /* TX header is consumed by HW on the way so we shouldn't count its
+ * bytes as being sent.
+ */
+ len = skb->len - MLXSW_TXHDR_LEN;
+
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
- err = mlxsw_core_skb_transmit(mlxsw_sp, skb, &tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
if (!err) {
pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
@@ -438,16 +721,94 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
+static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int pg_index, int mtu,
+ bool pause_en, bool pfc_en, u16 delay)
+{
+ u16 pg_size = 2 * MLXSW_SP_BYTES_TO_CELLS(mtu);
+
+ delay = pfc_en ? mlxsw_sp_pfc_delay_get(mtu, delay) :
+ MLXSW_SP_PAUSE_DELAY;
+
+ if (pause_en || pfc_en)
+ mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, pg_index,
+ pg_size + delay, pg_size);
+ else
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg_index, pg_size);
+}
+
+int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
+ u8 *prio_tc, bool pause_en,
+ struct ieee_pfc *my_pfc)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
+ u16 delay = !!my_pfc ? my_pfc->delay : 0;
+ char pbmc_pl[MLXSW_REG_PBMC_LEN];
+ int i, j, err;
+
+ mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+ if (err)
+ return err;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ bool configure = false;
+ bool pfc = false;
+
+ for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
+ if (prio_tc[j] == i) {
+ pfc = pfc_en & BIT(j);
+ configure = true;
+ break;
+ }
+ }
+
+ if (!configure)
+ continue;
+ mlxsw_sp_pg_buf_pack(pbmc_pl, i, mtu, pause_en, pfc, delay);
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+}
+
+static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ int mtu, bool pause_en)
+{
+ u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
+ bool dcb_en = !!mlxsw_sp_port->dcb.ets;
+ struct ieee_pfc *my_pfc;
+ u8 *prio_tc;
+
+ prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
+ my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
+
+ return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
+ pause_en, my_pfc);
+}
+
static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
int err;
- err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
+ err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
if (err)
return err;
+ err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
+ if (err)
+ goto err_span_port_mtu_update;
+ err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
+ if (err)
+ goto err_port_mtu_set;
dev->mtu = mtu;
return 0;
+
+err_port_mtu_set:
+ mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
+err_span_port_mtu_update:
+ mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ return err;
}
static struct rtnl_link_stats64 *
@@ -550,94 +911,8 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
-static struct mlxsw_sp_vfid *
-mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
-{
- struct mlxsw_sp_vfid *vfid;
-
- list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
- if (vfid->vid == vid)
- return vfid;
- }
-
- return NULL;
-}
-
-static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
-{
- return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
- MLXSW_SP_VFID_PORT_MAX);
-}
-
-static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
-{
- u16 fid = mlxsw_sp_vfid_to_fid(vfid);
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
-
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-}
-
-static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
-{
- u16 fid = mlxsw_sp_vfid_to_fid(vfid);
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
-
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-}
-
-static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
- u16 vid)
-{
- struct device *dev = mlxsw_sp->bus_info->dev;
- struct mlxsw_sp_vfid *vfid;
- u16 n_vfid;
- int err;
-
- n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
- if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
- dev_err(dev, "No available vFIDs\n");
- return ERR_PTR(-ERANGE);
- }
-
- err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
- if (err) {
- dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
- return ERR_PTR(err);
- }
-
- vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
- if (!vfid)
- goto err_allocate_vfid;
-
- vfid->vfid = n_vfid;
- vfid->vid = vid;
-
- list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
- set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
-
- return vfid;
-
-err_allocate_vfid:
- __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
- return ERR_PTR(-ENOMEM);
-}
-
-static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vfid *vfid)
-{
- clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
- list_del(&vfid->list);
-
- __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
-
- kfree(vfid);
-}
-
static struct mlxsw_sp_port *
-mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_vfid *vfid)
+mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
@@ -655,8 +930,7 @@ mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
- mlxsw_sp_vport->vport.vfid = vfid;
- mlxsw_sp_vport->vport.vid = vfid->vid;
+ mlxsw_sp_vport->vport.vid = vid;
list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
@@ -669,13 +943,12 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
kfree(mlxsw_sp_vport);
}
-int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
- u16 vid)
+static int mlxsw_sp_port_add_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_vfid *vfid;
+ bool untagged = vid == 1;
int err;
/* VLAN 0 is added to HW filter when device goes up, but it is
@@ -684,37 +957,12 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
if (!vid)
return 0;
- if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
- netdev_warn(dev, "VID=%d already configured\n", vid);
+ if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
return 0;
- }
-
- vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
- if (!vfid) {
- vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
- if (IS_ERR(vfid)) {
- netdev_err(dev, "Failed to create vFID for VID=%d\n",
- vid);
- return PTR_ERR(vfid);
- }
- }
- mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
- if (!mlxsw_sp_vport) {
- netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
- err = -ENOMEM;
- goto err_port_vport_create;
- }
-
- if (!vfid->nr_vports) {
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
- true, false);
- if (err) {
- netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
- vfid->vfid);
- goto err_vport_flood_set;
- }
- }
+ mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
+ if (!mlxsw_sp_vport)
+ return -ENOMEM;
/* When adding the first VLAN interface on a bridged port we need to
* transition all the active 802.1Q bridge VLANs to use explicit
@@ -722,77 +970,36 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
*/
if (list_is_singular(&mlxsw_sp_port->vports_list)) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
- if (err) {
- netdev_err(dev, "Failed to set to Virtual mode\n");
+ if (err)
goto err_port_vp_mode_trans;
- }
- }
-
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- true,
- mlxsw_sp_vfid_to_fid(vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
- vid, vfid->vfid);
- goto err_port_vid_to_fid_set;
}
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
- if (err) {
- netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
+ if (err)
goto err_port_vid_learning_set;
- }
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
- if (err) {
- netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
- vid);
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
+ if (err)
goto err_port_add_vid;
- }
-
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
- MLXSW_REG_SPMS_STATE_FORWARDING);
- if (err) {
- netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
- goto err_port_stp_state_set;
- }
-
- vfid->nr_vports++;
return 0;
-err_port_stp_state_set:
- mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
err_port_add_vid:
mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
err_port_vid_learning_set:
- mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
- mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
-err_port_vid_to_fid_set:
if (list_is_singular(&mlxsw_sp_port->vports_list))
mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
err_port_vp_mode_trans:
- if (!vfid->nr_vports)
- mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
- false);
-err_vport_flood_set:
mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
-err_port_vport_create:
- if (!vfid->nr_vports)
- mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
return err;
}
-int mlxsw_sp_port_kill_vid(struct net_device *dev,
- __be16 __always_unused proto, u16 vid)
+static int mlxsw_sp_port_kill_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_vfid *vfid;
- int err;
+ struct mlxsw_sp_fid *f;
/* VLAN 0 is removed from HW filter when device goes down, but
* it is reserved in our case, so simply return.
@@ -801,82 +1008,218 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport) {
- netdev_warn(dev, "VID=%d does not exist\n", vid);
+ if (WARN_ON(!mlxsw_sp_vport))
return 0;
- }
- vfid = mlxsw_sp_vport->vport.vfid;
+ mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
- MLXSW_REG_SPMS_STATE_DISCARDING);
- if (err) {
- netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
- return err;
- }
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
- if (err) {
- netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
- vid);
- return err;
+ /* Drop FID reference. If this was the last reference the
+ * resources will be freed.
+ */
+ f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+ if (f && !WARN_ON(!f->leave))
+ f->leave(mlxsw_sp_vport);
+
+ /* When removing the last VLAN interface on a bridged port we need to
+ * transition all active 802.1Q bridge VLANs to use VID to FID
+ * mappings and set port's mode to VLAN mode.
+ */
+ if (list_is_singular(&mlxsw_sp_port->vports_list))
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
+
+ mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
+
+ return 0;
+}
+
+static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
+ size_t len)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ u8 module = mlxsw_sp_port->mapping.module;
+ u8 width = mlxsw_sp_port->mapping.width;
+ u8 lane = mlxsw_sp_port->mapping.lane;
+ int err;
+
+ if (!mlxsw_sp_port->split)
+ err = snprintf(name, len, "p%d", module + 1);
+ else
+ err = snprintf(name, len, "p%ds%d", module + 1,
+ lane / width);
+
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct mlxsw_sp_port_mall_tc_entry *
+mlxsw_sp_port_mirror_entry_find(struct mlxsw_sp_port *port,
+ unsigned long cookie) {
+ struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
+
+ list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
+ if (mall_tc_entry->cookie == cookie)
+ return mall_tc_entry;
+
+ return NULL;
+}
+
+static int
+mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_cls_matchall_offload *cls,
+ const struct tc_action *a,
+ bool ingress)
+{
+ struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
+ struct net *net = dev_net(mlxsw_sp_port->dev);
+ enum mlxsw_sp_span_type span_type;
+ struct mlxsw_sp_port *to_port;
+ struct net_device *to_dev;
+ int ifindex;
+ int err;
+
+ ifindex = tcf_mirred_ifindex(a);
+ to_dev = __dev_get_by_index(net, ifindex);
+ if (!to_dev) {
+ netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
+ return -EINVAL;
}
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
- if (err) {
- netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
- return err;
+ if (!mlxsw_sp_port_dev_check(to_dev)) {
+ netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
+ return -ENOTSUPP;
}
+ to_port = netdev_priv(to_dev);
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- false,
- mlxsw_sp_vfid_to_fid(vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
- vid, vfid->vfid);
- return err;
+ mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
+ if (!mall_tc_entry)
+ return -ENOMEM;
+
+ mall_tc_entry->cookie = cls->cookie;
+ mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
+ mall_tc_entry->mirror.to_local_port = to_port->local_port;
+ mall_tc_entry->mirror.ingress = ingress;
+ list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
+
+ span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+ err = mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
+ if (err)
+ goto err_mirror_add;
+ return 0;
+
+err_mirror_add:
+ list_del(&mall_tc_entry->list);
+ kfree(mall_tc_entry);
+ return err;
+}
+
+static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
+ __be16 protocol,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
+{
+ const struct tc_action *a;
+ LIST_HEAD(actions);
+ int err;
+
+ if (!tc_single_action(cls->exts)) {
+ netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
+ return -ENOTSUPP;
}
- /* When removing the last VLAN interface on a bridged port we need to
- * transition all active 802.1Q bridge VLANs to use VID to FID
- * mappings and set port's mode to VLAN mode.
- */
- if (list_is_singular(&mlxsw_sp_port->vports_list)) {
- err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
- if (err) {
- netdev_err(dev, "Failed to set to VLAN mode\n");
+ tcf_exts_to_list(cls->exts, &actions);
+ list_for_each_entry(a, &actions, list) {
+ if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL))
+ return -ENOTSUPP;
+
+ err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port, cls,
+ a, ingress);
+ if (err)
return err;
- }
}
- vfid->nr_vports--;
- mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
+ return 0;
+}
- /* Destroy the vFID if no vPorts are assigned to it anymore. */
- if (!vfid->nr_vports)
- mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
+static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
+ enum mlxsw_sp_span_type span_type;
+ struct mlxsw_sp_port *to_port;
+
+ mall_tc_entry = mlxsw_sp_port_mirror_entry_find(mlxsw_sp_port,
+ cls->cookie);
+ if (!mall_tc_entry) {
+ netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
+ return;
+ }
- return 0;
+ switch (mall_tc_entry->type) {
+ case MLXSW_SP_PORT_MALL_MIRROR:
+ to_port = mlxsw_sp->ports[mall_tc_entry->mirror.to_local_port];
+ span_type = mall_tc_entry->mirror.ingress ?
+ MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
+
+ mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ list_del(&mall_tc_entry->list);
+ kfree(mall_tc_entry);
+}
+
+static int mlxsw_sp_setup_tc(struct net_device *dev, u32 handle,
+ __be16 proto, struct tc_to_netdev *tc)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
+
+ if (tc->type == TC_SETUP_MATCHALL) {
+ switch (tc->cls_mall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port,
+ proto,
+ tc->cls_mall,
+ ingress);
+ case TC_CLSMATCHALL_DESTROY:
+ mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port,
+ tc->cls_mall);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return -ENOTSUPP;
}
static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
.ndo_open = mlxsw_sp_port_open,
.ndo_stop = mlxsw_sp_port_stop,
.ndo_start_xmit = mlxsw_sp_port_xmit,
+ .ndo_setup_tc = mlxsw_sp_setup_tc,
.ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
.ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
.ndo_change_mtu = mlxsw_sp_port_change_mtu,
.ndo_get_stats64 = mlxsw_sp_port_get_stats64,
.ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
.ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
+ .ndo_neigh_construct = mlxsw_sp_router_neigh_construct,
+ .ndo_neigh_destroy = mlxsw_sp_router_neigh_destroy,
.ndo_fdb_add = switchdev_port_fdb_add,
.ndo_fdb_del = switchdev_port_fdb_del,
.ndo_fdb_dump = switchdev_port_fdb_dump,
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
+ .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
};
static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
@@ -897,12 +1240,74 @@ static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
sizeof(drvinfo->bus_info));
}
+static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ pause->rx_pause = mlxsw_sp_port->link.rx_pause;
+ pause->tx_pause = mlxsw_sp_port->link.tx_pause;
+}
+
+static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ethtool_pauseparam *pause)
+{
+ char pfcc_pl[MLXSW_REG_PFCC_LEN];
+
+ mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
+ mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
+
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
+ pfcc_pl);
+}
+
+static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ bool pause_en = pause->tx_pause || pause->rx_pause;
+ int err;
+
+ if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
+ netdev_err(dev, "PFC already enabled on port\n");
+ return -EINVAL;
+ }
+
+ if (pause->autoneg) {
+ netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
+ return -EINVAL;
+ }
+
+ err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
+ if (err) {
+ netdev_err(dev, "Failed to set PAUSE parameters\n");
+ goto err_port_pause_configure;
+ }
+
+ mlxsw_sp_port->link.rx_pause = pause->rx_pause;
+ mlxsw_sp_port->link.tx_pause = pause->tx_pause;
+
+ return 0;
+
+err_port_pause_configure:
+ pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
+ mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
+ return err;
+}
+
struct mlxsw_sp_port_hw_stats {
char str[ETH_GSTRING_LEN];
u64 (*getter)(char *payload);
};
-static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
{
.str = "a_frames_transmitted_ok",
.getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
@@ -983,6 +1388,90 @@ static const struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
#define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
+ {
+ .str = "rx_octets_prio",
+ .getter = mlxsw_reg_ppcnt_rx_octets_get,
+ },
+ {
+ .str = "rx_frames_prio",
+ .getter = mlxsw_reg_ppcnt_rx_frames_get,
+ },
+ {
+ .str = "tx_octets_prio",
+ .getter = mlxsw_reg_ppcnt_tx_octets_get,
+ },
+ {
+ .str = "tx_frames_prio",
+ .getter = mlxsw_reg_ppcnt_tx_frames_get,
+ },
+ {
+ .str = "rx_pause_prio",
+ .getter = mlxsw_reg_ppcnt_rx_pause_get,
+ },
+ {
+ .str = "rx_pause_duration_prio",
+ .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
+ },
+ {
+ .str = "tx_pause_prio",
+ .getter = mlxsw_reg_ppcnt_tx_pause_get,
+ },
+ {
+ .str = "tx_pause_duration_prio",
+ .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
+
+static u64 mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get(char *ppcnt_pl)
+{
+ u64 transmit_queue = mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
+
+ return MLXSW_SP_CELLS_TO_BYTES(transmit_queue);
+}
+
+static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
+ {
+ .str = "tc_transmit_queue_tc",
+ .getter = mlxsw_reg_ppcnt_tc_transmit_queue_bytes_get,
+ },
+ {
+ .str = "tc_no_buffer_discard_uc_tc",
+ .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
+ },
+};
+
+#define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
+
+#define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
+ (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
+ MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
+ IEEE_8021QAZ_MAX_TCS)
+
+static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
+ snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
+ mlxsw_sp_port_hw_prio_stats[i].str, prio);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
+static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
+ snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
+ mlxsw_sp_port_hw_tc_stats[i].str, tc);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
static void mlxsw_sp_port_get_strings(struct net_device *dev,
u32 stringset, u8 *data)
{
@@ -996,6 +1485,13 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port_get_prio_strings(&p, i);
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port_get_tc_strings(&p, i);
+
break;
}
}
@@ -1023,26 +1519,80 @@ static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
}
-static void mlxsw_sp_port_get_stats(struct net_device *dev,
- struct ethtool_stats *stats, u64 *data)
+static int
+mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
+ int *p_len, enum mlxsw_reg_ppcnt_grp grp)
+{
+ switch (grp) {
+ case MLXSW_REG_PPCNT_IEEE_8023_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_stats;
+ *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
+ break;
+ case MLXSW_REG_PPCNT_PRIO_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
+ *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
+ break;
+ case MLXSW_REG_PPCNT_TC_CNT:
+ *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
+ *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOTSUPP;
+ }
+ return 0;
+}
+
+static void __mlxsw_sp_port_get_stats(struct net_device *dev,
+ enum mlxsw_reg_ppcnt_grp grp, int prio,
+ u64 *data, int data_index)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_port_hw_stats *hw_stats;
char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
- int i;
+ int i, len;
int err;
- mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port);
+ err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
+ if (err)
+ return;
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
- for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++)
- data[i] = !err ? mlxsw_sp_port_hw_stats[i].getter(ppcnt_pl) : 0;
+ for (i = 0; i < len; i++)
+ data[data_index + i] = !err ? hw_stats[i].getter(ppcnt_pl) : 0;
+}
+
+static void mlxsw_sp_port_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ int i, data_index = 0;
+
+ /* IEEE 802.3 Counters */
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
+ data, data_index);
+ data_index = MLXSW_SP_PORT_HW_STATS_LEN;
+
+ /* Per-Priority Counters */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
+ }
+
+ /* Per-TC Counters */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
+ data, data_index);
+ data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
+ }
}
static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return MLXSW_SP_PORT_HW_STATS_LEN;
+ return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
default:
return -EOPNOTSUPP;
}
@@ -1263,7 +1813,8 @@ static int mlxsw_sp_port_get_settings(struct net_device *dev,
cmd->supported = mlxsw_sp_from_ptys_supported_port(eth_proto_cap) |
mlxsw_sp_from_ptys_supported_link(eth_proto_cap) |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause |
+ SUPPORTED_Autoneg;
cmd->advertising = mlxsw_sp_from_ptys_advert_link(eth_proto_admin);
mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev),
eth_proto_oper, cmd);
@@ -1322,7 +1873,6 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
u32 eth_proto_new;
u32 eth_proto_cap;
u32 eth_proto_admin;
- bool is_up;
int err;
speed = ethtool_cmd_speed(cmd);
@@ -1354,12 +1904,7 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
return err;
}
- err = mlxsw_sp_port_oper_status_get(mlxsw_sp_port, &is_up);
- if (err) {
- netdev_err(dev, "Failed to get oper status");
- return err;
- }
- if (!is_up)
+ if (!netif_running(dev))
return 0;
err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
@@ -1380,6 +1925,8 @@ static int mlxsw_sp_port_set_settings(struct net_device *dev,
static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_drvinfo = mlxsw_sp_port_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_pauseparam = mlxsw_sp_port_get_pauseparam,
+ .set_pauseparam = mlxsw_sp_port_set_pauseparam,
.get_strings = mlxsw_sp_port_get_strings,
.set_phys_id = mlxsw_sp_port_set_phys_id,
.get_ethtool_stats = mlxsw_sp_port_get_stats,
@@ -1402,12 +1949,124 @@ mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
}
-static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width)
+int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
+ bool dwrr, u8 dwrr_weight)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+ mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
+ next_index);
+ mlxsw_reg_qeec_de_set(qeec_pl, true);
+ mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
+ mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
+int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index,
+ u8 next_index, u32 maxrate)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qeec_pl[MLXSW_REG_QEEC_LEN];
+
+ mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
+ next_index);
+ mlxsw_reg_qeec_mase_set(qeec_pl, true);
+ mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
+}
+
+int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 switch_prio, u8 tclass)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char qtct_pl[MLXSW_REG_QTCT_LEN];
+
+ mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
+ tclass);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
+}
+
+static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err, i;
+
+ /* Setup the elements hierarcy, so that each TC is linked to
+ * one subgroup, which are all member in the same group.
+ */
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
+ 0);
+ if (err)
+ return err;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ 0, false, 0);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_TC, i, i,
+ false, 0);
+ if (err)
+ return err;
+ }
+
+ /* Make sure the max shaper is disabled in all hierarcies that
+ * support it.
+ */
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
+ MLXSW_REG_QEEC_MAS_DIS);
+ if (err)
+ return err;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ i, 0,
+ MLXSW_REG_QEEC_MAS_DIS);
+ if (err)
+ return err;
+ }
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_TC,
+ i, i,
+ MLXSW_REG_QEEC_MAS_DIS);
+ if (err)
+ return err;
+ }
+
+ /* Map all priorities to traffic class 0. */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->pvid = 1;
+
+ return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
+}
+
+static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
+}
+
+static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ bool split, u8 module, u8 width, u8 lane)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_port *mlxsw_sp_port;
- struct devlink_port *devlink_port;
struct net_device *dev;
size_t bytes;
int err;
@@ -1420,6 +2079,9 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->split = split;
+ mlxsw_sp_port->mapping.module = module;
+ mlxsw_sp_port->mapping.width = width;
+ mlxsw_sp_port->mapping.lane = lane;
bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
if (!mlxsw_sp_port->active_vlans) {
@@ -1432,6 +2094,7 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_untagged_vlans_alloc;
}
INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
+ INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
mlxsw_sp_port->pcpu_stats =
netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
@@ -1443,6 +2106,13 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
+ err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_swid_set;
+ }
+
err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
@@ -1453,23 +2123,14 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
netif_carrier_off(dev);
dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
+ dev->hw_features |= NETIF_F_HW_TC;
/* Each packet needs to have a Tx header (metadata) on top all other
* headers.
*/
dev->hard_header_len += MLXSW_TXHDR_LEN;
- devlink_port = &mlxsw_sp_port->devlink_port;
- if (mlxsw_sp_port->split)
- devlink_port_split_set(devlink_port, module);
- err = devlink_port_register(devlink, devlink_port, local_port);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register devlink port\n",
- mlxsw_sp_port->local_port);
- goto err_devlink_port_register;
- }
-
err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
@@ -1477,13 +2138,6 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_system_port_mapping_set;
}
- err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
- mlxsw_sp_port->local_port);
- goto err_port_swid_set;
- }
-
err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
@@ -1509,7 +2163,30 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_buffers_init;
}
+ err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_ets_init;
+ }
+
+ /* ETS and buffers must be initialized before DCB. */
+ err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_dcb_init;
+ }
+
+ err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_pvid_vport_create;
+ }
+
mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
+ mlxsw_sp->ports[local_port] = mlxsw_sp_port;
err = register_netdev(dev);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
@@ -1517,27 +2194,35 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_register_netdev;
}
- devlink_port_type_eth_set(devlink_port, dev);
-
- err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
- if (err)
- goto err_port_vlan_init;
+ err = mlxsw_core_port_init(mlxsw_sp->core, &mlxsw_sp_port->core_port,
+ mlxsw_sp_port->local_port, dev,
+ mlxsw_sp_port->split, module);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
+ mlxsw_sp_port->local_port);
+ goto err_core_port_init;
+ }
- mlxsw_sp->ports[local_port] = mlxsw_sp_port;
return 0;
-err_port_vlan_init:
+err_core_port_init:
unregister_netdev(dev);
err_register_netdev:
+ mlxsw_sp->ports[local_port] = NULL;
+ mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+ mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
+err_port_pvid_vport_create:
+ mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
+err_port_dcb_init:
+err_port_ets_init:
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
err_port_speed_by_width_set:
-err_port_swid_set:
err_port_system_port_mapping_set:
- devlink_port_unregister(&mlxsw_sp_port->devlink_port);
-err_devlink_port_register:
err_dev_addr_init:
+ mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
+err_port_swid_set:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
kfree(mlxsw_sp_port->untagged_vlans);
@@ -1548,64 +2233,24 @@ err_port_active_vlans_alloc:
return err;
}
-static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- bool split, u8 module, u8 width, u8 lane)
-{
- int err;
-
- err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
- lane);
- if (err)
- return err;
-
- err = __mlxsw_sp_port_create(mlxsw_sp, local_port, split, module,
- width);
- if (err)
- goto err_port_create;
-
- return 0;
-
-err_port_create:
- mlxsw_sp_port_module_unmap(mlxsw_sp, local_port);
- return err;
-}
-
-static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
-
- list_for_each_entry_safe(mlxsw_sp_vport, tmp,
- &mlxsw_sp_port->vports_list, vport.list) {
- u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
-
- /* vPorts created for VLAN devices should already be gone
- * by now, since we unregistered the port netdev.
- */
- WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
- mlxsw_sp_port_kill_vid(dev, 0, vid);
- }
-}
-
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
- struct devlink_port *devlink_port;
if (!mlxsw_sp_port)
return;
- mlxsw_sp->ports[local_port] = NULL;
- devlink_port = &mlxsw_sp_port->devlink_port;
- devlink_port_type_clear(devlink_port);
+ mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
- devlink_port_unregister(devlink_port);
- mlxsw_sp_port_vports_fini(mlxsw_sp_port);
+ mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+ mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
+ mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
kfree(mlxsw_sp_port->untagged_vlans);
kfree(mlxsw_sp_port->active_vlans);
+ WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vports_list));
free_netdev(mlxsw_sp_port->dev);
}
@@ -1620,8 +2265,8 @@ static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
{
+ u8 module, width, lane;
size_t alloc_size;
- u8 module, width;
int i;
int err;
@@ -1632,13 +2277,14 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
for (i = 1; i < MLXSW_PORT_MAX_PORTS; i++) {
err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
- &width);
+ &width, &lane);
if (err)
goto err_port_module_info_get;
if (!width)
continue;
mlxsw_sp->port_to_module[i] = module;
- err = __mlxsw_sp_port_create(mlxsw_sp, i, false, module, width);
+ err = mlxsw_sp_port_create(mlxsw_sp, i, false, module, width,
+ lane);
if (err)
goto err_port_create;
}
@@ -1659,11 +2305,85 @@ static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
return local_port - offset;
}
-static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
+static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+ u8 module, unsigned int count)
{
- struct mlxsw_sp *mlxsw_sp = priv;
- struct mlxsw_sp_port *mlxsw_sp_port;
u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
+ int err, i;
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_module_map(mlxsw_sp, base_port + i, module,
+ width, i * width);
+ if (err)
+ goto err_port_module_map;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i, 0);
+ if (err)
+ goto err_port_swid_set;
+ }
+
+ for (i = 0; i < count; i++) {
+ err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
+ module, width, i * width);
+ if (err)
+ goto err_port_create;
+ }
+
+ return 0;
+
+err_port_create:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
+ i = count;
+err_port_swid_set:
+ for (i--; i >= 0; i--)
+ __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i,
+ MLXSW_PORT_SWID_DISABLED_PORT);
+ i = count;
+err_port_module_map:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_module_unmap(mlxsw_sp, base_port + i);
+ return err;
+}
+
+static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
+ u8 base_port, unsigned int count)
+{
+ u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
+ int i;
+
+ /* Split by four means we need to re-create two ports, otherwise
+ * only one.
+ */
+ count = count / 2;
+
+ for (i = 0; i < count; i++) {
+ local_port = base_port + i * 2;
+ module = mlxsw_sp->port_to_module[local_port];
+
+ mlxsw_sp_port_module_map(mlxsw_sp, local_port, module, width,
+ 0);
+ }
+
+ for (i = 0; i < count; i++)
+ __mlxsw_sp_port_swid_set(mlxsw_sp, base_port + i * 2, 0);
+
+ for (i = 0; i < count; i++) {
+ local_port = base_port + i * 2;
+ module = mlxsw_sp->port_to_module[local_port];
+
+ mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
+ width, 0);
+ }
+}
+
+static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
+ unsigned int count)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_port *mlxsw_sp_port;
u8 module, cur_width, base_port;
int i;
int err;
@@ -1675,18 +2395,14 @@ static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
return -EINVAL;
}
+ module = mlxsw_sp_port->mapping.module;
+ cur_width = mlxsw_sp_port->mapping.width;
+
if (count != 2 && count != 4) {
netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
return -EINVAL;
}
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
- &cur_width);
- if (err) {
- netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
- return err;
- }
-
if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
return -EINVAL;
@@ -1711,36 +2427,26 @@ static int mlxsw_sp_port_split(void *priv, u8 local_port, unsigned int count)
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count; i++) {
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
- module, width, i * width);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to create split port\n");
- goto err_port_create;
- }
+ err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
+ goto err_port_split_create;
}
return 0;
-err_port_create:
- for (i--; i >= 0; i--)
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count / 2; i++) {
- module = mlxsw_sp->port_to_module[base_port + i * 2];
- mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
- module, MLXSW_PORT_MODULE_MAX_WIDTH, 0);
- }
+err_port_split_create:
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return err;
}
-static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
+static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
{
- struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
- u8 module, cur_width, base_port;
+ u8 cur_width, base_port;
unsigned int count;
int i;
- int err;
mlxsw_sp_port = mlxsw_sp->ports[local_port];
if (!mlxsw_sp_port) {
@@ -1754,12 +2460,7 @@ static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
return -EINVAL;
}
- err = mlxsw_sp_port_module_info_get(mlxsw_sp, local_port, &module,
- &cur_width);
- if (err) {
- netdev_err(mlxsw_sp_port->dev, "Failed to get port's width\n");
- return err;
- }
+ cur_width = mlxsw_sp_port->mapping.width;
count = cur_width == 1 ? 4 : 2;
base_port = mlxsw_sp_cluster_base_port_get(local_port);
@@ -1771,14 +2472,7 @@ static int mlxsw_sp_port_unsplit(void *priv, u8 local_port)
for (i = 0; i < count; i++)
mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
- for (i = 0; i < count / 2; i++) {
- module = mlxsw_sp->port_to_module[base_port + i * 2];
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * 2, false,
- module, MLXSW_PORT_MODULE_MAX_WIDTH,
- 0);
- if (err)
- dev_err(mlxsw_sp->bus_info->dev, "Failed to reinstantiate port\n");
- }
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
return 0;
}
@@ -1793,11 +2487,8 @@ static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
local_port = mlxsw_reg_pude_local_port_get(pude_pl);
mlxsw_sp_port = mlxsw_sp->ports[local_port];
- if (!mlxsw_sp_port) {
- dev_warn(mlxsw_sp->bus_info->dev, "Port %d: Link event received for non-existent port\n",
- local_port);
+ if (!mlxsw_sp_port)
return;
- }
status = mlxsw_reg_pude_oper_status_get(pude_pl);
if (status == MLXSW_PORT_OPER_STATUS_UP) {
@@ -1952,6 +2643,51 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
.local_port = MLXSW_PORT_DONT_CARE,
.trap_id = MLXSW_TRAP_ID_IGMP_V3_REPORT,
},
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_ARPBC,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_ARPUC,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MTUERROR,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_TTLERROR,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LBERROR,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_OSPF,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_IP2ME,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_RTR_INGRESS0,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_HOST_MISS_IPV4,
+ },
};
static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
@@ -1992,7 +2728,7 @@ err_rx_trap_set:
mlxsw_sp);
err_rx_listener_register:
for (i--; i >= 0; i--) {
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
mlxsw_sp_rx_listener[i].trap_id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
@@ -2009,7 +2745,7 @@ static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
int i;
for (i = 0; i < ARRAY_SIZE(mlxsw_sp_rx_listener); i++) {
- mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
+ mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
mlxsw_sp_rx_listener[i].trap_id);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(hpkt), hpkt_pl);
@@ -2080,16 +2816,16 @@ static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
}
-static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
+static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
- struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
int err;
mlxsw_sp->core = mlxsw_core;
mlxsw_sp->bus_info = mlxsw_bus_info;
- INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
- INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
+ INIT_LIST_HEAD(&mlxsw_sp->fids);
+ INIT_LIST_HEAD(&mlxsw_sp->vfids.list);
INIT_LIST_HEAD(&mlxsw_sp->br_mids.list);
err = mlxsw_sp_base_mac_get(mlxsw_sp);
@@ -2098,16 +2834,10 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
return err;
}
- err = mlxsw_sp_ports_create(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
- return err;
- }
-
err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to register for PUDE events\n");
- goto err_event_register;
+ return err;
}
err = mlxsw_sp_traps_init(mlxsw_sp);
@@ -2140,28 +2870,59 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
goto err_switchdev_init;
}
+ err = mlxsw_sp_router_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
+ goto err_router_init;
+ }
+
+ err = mlxsw_sp_span_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
+ goto err_span_init;
+ }
+
+ err = mlxsw_sp_ports_create(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
+ goto err_ports_create;
+ }
+
return 0;
+err_ports_create:
+ mlxsw_sp_span_fini(mlxsw_sp);
+err_span_init:
+ mlxsw_sp_router_fini(mlxsw_sp);
+err_router_init:
+ mlxsw_sp_switchdev_fini(mlxsw_sp);
err_switchdev_init:
err_lag_init:
+ mlxsw_sp_buffers_fini(mlxsw_sp);
err_buffers_init:
err_flood_init:
mlxsw_sp_traps_fini(mlxsw_sp);
err_rx_listener_register:
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
-err_event_register:
- mlxsw_sp_ports_remove(mlxsw_sp);
return err;
}
-static void mlxsw_sp_fini(void *priv)
+static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
{
- struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ int i;
+ mlxsw_sp_ports_remove(mlxsw_sp);
+ mlxsw_sp_span_fini(mlxsw_sp);
+ mlxsw_sp_router_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
+ mlxsw_sp_buffers_fini(mlxsw_sp);
mlxsw_sp_traps_fini(mlxsw_sp);
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
- mlxsw_sp_ports_remove(mlxsw_sp);
+ WARN_ON(!list_empty(&mlxsw_sp->vfids.list));
+ WARN_ON(!list_empty(&mlxsw_sp->fids));
+ for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ WARN_ON_ONCE(mlxsw_sp->rifs[i]);
}
static struct mlxsw_config_profile mlxsw_sp_config_profile = {
@@ -2192,37 +2953,667 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
.max_ib_mc = 0,
.used_max_pkey = 1,
.max_pkey = 0,
+ .used_kvd_sizes = 1,
+ .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
+ .kvd_hash_single_size = MLXSW_SP_KVD_HASH_SINGLE_SIZE,
+ .kvd_hash_double_size = MLXSW_SP_KVD_HASH_DOUBLE_SIZE,
.swid_config = {
{
.used_type = 1,
.type = MLXSW_PORT_SWID_TYPE_ETH,
}
},
+ .resource_query_enable = 1,
};
static struct mlxsw_driver mlxsw_sp_driver = {
- .kind = MLXSW_DEVICE_KIND_SPECTRUM,
- .owner = THIS_MODULE,
- .priv_size = sizeof(struct mlxsw_sp),
- .init = mlxsw_sp_init,
- .fini = mlxsw_sp_fini,
- .port_split = mlxsw_sp_port_split,
- .port_unsplit = mlxsw_sp_port_unsplit,
- .txhdr_construct = mlxsw_sp_txhdr_construct,
- .txhdr_len = MLXSW_TXHDR_LEN,
- .profile = &mlxsw_sp_config_profile,
+ .kind = MLXSW_DEVICE_KIND_SPECTRUM,
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct mlxsw_sp),
+ .init = mlxsw_sp_init,
+ .fini = mlxsw_sp_fini,
+ .port_split = mlxsw_sp_port_split,
+ .port_unsplit = mlxsw_sp_port_unsplit,
+ .sb_pool_get = mlxsw_sp_sb_pool_get,
+ .sb_pool_set = mlxsw_sp_sb_pool_set,
+ .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
+ .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
+ .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
+ .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
+ .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
+ .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
+ .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
+ .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
+ .txhdr_construct = mlxsw_sp_txhdr_construct,
+ .txhdr_len = MLXSW_TXHDR_LEN,
+ .profile = &mlxsw_sp_config_profile,
};
-static int
-mlxsw_sp_port_fdb_flush_by_port(const struct mlxsw_sp_port *mlxsw_sp_port)
+static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
+}
+
+static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ if (mlxsw_sp_port_dev_check(dev))
+ return netdev_priv(dev);
+
+ netdev_for_each_all_lower_dev(dev, lower_dev, iter) {
+ if (mlxsw_sp_port_dev_check(lower_dev))
+ return netdev_priv(lower_dev);
+ }
+ return NULL;
+}
+
+static struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
+ return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
+}
+
+static struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
+{
+ struct net_device *lower_dev;
+ struct list_head *iter;
+
+ if (mlxsw_sp_port_dev_check(dev))
+ return netdev_priv(dev);
+
+ netdev_for_each_all_lower_dev_rcu(dev, lower_dev, iter) {
+ if (mlxsw_sp_port_dev_check(lower_dev))
+ return netdev_priv(lower_dev);
+ }
+ return NULL;
+}
+
+struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ rcu_read_lock();
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
+ if (mlxsw_sp_port)
+ dev_hold(mlxsw_sp_port->dev);
+ rcu_read_unlock();
+ return mlxsw_sp_port;
+}
+
+void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ dev_put(mlxsw_sp_port->dev);
+}
+
+static bool mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *r,
+ unsigned long event)
+{
+ switch (event) {
+ case NETDEV_UP:
+ if (!r)
+ return true;
+ r->ref_count++;
+ return false;
+ case NETDEV_DOWN:
+ if (r && --r->ref_count == 0)
+ return true;
+ /* It is possible we already removed the RIF ourselves
+ * if it was assigned to a netdev that is now a bridge
+ * or LAG slave.
+ */
+ return false;
+ }
+
+ return false;
+}
+
+static int mlxsw_sp_avail_rif_get(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ if (!mlxsw_sp->rifs[i])
+ return i;
+
+ return MLXSW_SP_RIF_MAX;
+}
+
+static void mlxsw_sp_vport_rif_sp_attr_get(struct mlxsw_sp_port *mlxsw_sp_vport,
+ bool *p_lagged, u16 *p_system_port)
+{
+ u8 local_port = mlxsw_sp_vport->local_port;
+
+ *p_lagged = mlxsw_sp_vport->lagged;
+ *p_system_port = *p_lagged ? mlxsw_sp_vport->lag_id : local_port;
+}
+
+static int mlxsw_sp_vport_rif_sp_op(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *l3_dev, u16 rif,
+ bool create)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ bool lagged = mlxsw_sp_vport->lagged;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ u16 system_port;
+
+ mlxsw_reg_ritr_pack(ritr_pl, create, MLXSW_REG_RITR_SP_IF, rif,
+ l3_dev->mtu, l3_dev->dev_addr);
+
+ mlxsw_sp_vport_rif_sp_attr_get(mlxsw_sp_vport, &lagged, &system_port);
+ mlxsw_reg_ritr_sp_if_pack(ritr_pl, lagged, system_port,
+ mlxsw_sp_vport_vid_get(mlxsw_sp_vport));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
+
+static struct mlxsw_sp_fid *
+mlxsw_sp_rfid_alloc(u16 fid, struct net_device *l3_dev)
+{
+ struct mlxsw_sp_fid *f;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ f->leave = mlxsw_sp_vport_rif_sp_leave;
+ f->ref_count = 0;
+ f->dev = l3_dev;
+ f->fid = fid;
+
+ return f;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_rif_alloc(u16 rif, struct net_device *l3_dev, struct mlxsw_sp_fid *f)
+{
+ struct mlxsw_sp_rif *r;
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
+
+ ether_addr_copy(r->addr, l3_dev->dev_addr);
+ r->mtu = l3_dev->mtu;
+ r->ref_count = 1;
+ r->dev = l3_dev;
+ r->rif = rif;
+ r->f = f;
+
+ return r;
+}
+
+static struct mlxsw_sp_rif *
+mlxsw_sp_vport_rif_sp_create(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_fid *f;
+ struct mlxsw_sp_rif *r;
+ u16 fid, rif;
+ int err;
+
+ rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
+ if (rif == MLXSW_SP_RIF_MAX)
+ return ERR_PTR(-ERANGE);
+
+ err = mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, true);
+ if (err)
+ return ERR_PTR(err);
+
+ fid = mlxsw_sp_rif_sp_to_fid(rif);
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ f = mlxsw_sp_rfid_alloc(fid, l3_dev);
+ if (!f) {
+ err = -ENOMEM;
+ goto err_rfid_alloc;
+ }
+
+ r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
+ if (!r) {
+ err = -ENOMEM;
+ goto err_rif_alloc;
+ }
+
+ f->r = r;
+ mlxsw_sp->rifs[rif] = r;
+
+ return r;
+
+err_rif_alloc:
+ kfree(f);
+err_rfid_alloc:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+err_rif_fdb_op:
+ mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_vport_rif_sp_destroy(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct mlxsw_sp_rif *r)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct net_device *l3_dev = r->dev;
+ struct mlxsw_sp_fid *f = r->f;
+ u16 fid = f->fid;
+ u16 rif = r->rif;
+
+ mlxsw_sp->rifs[rif] = NULL;
+ f->r = NULL;
+
+ kfree(r);
+
+ kfree(f);
+
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, fid, false);
+
+ mlxsw_sp_vport_rif_sp_op(mlxsw_sp_vport, l3_dev, rif, false);
+}
+
+static int mlxsw_sp_vport_rif_sp_join(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *l3_dev)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_rif *r;
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
+ if (!r) {
+ r = mlxsw_sp_vport_rif_sp_create(mlxsw_sp_vport, l3_dev);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+ }
+
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, r->f);
+ r->f->ref_count++;
+
+ netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", r->f->fid);
+
+ return 0;
+}
+
+static void mlxsw_sp_vport_rif_sp_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+ netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
+
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
+ if (--f->ref_count == 0)
+ mlxsw_sp_vport_rif_sp_destroy(mlxsw_sp_vport, f->r);
+}
+
+static int mlxsw_sp_inetaddr_vport_event(struct net_device *l3_dev,
+ struct net_device *port_dev,
+ unsigned long event, u16 vid)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(port_dev);
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
+ if (WARN_ON(!mlxsw_sp_vport))
+ return -EINVAL;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_vport_rif_sp_join(mlxsw_sp_vport, l3_dev);
+ case NETDEV_DOWN:
+ mlxsw_sp_vport_rif_sp_leave(mlxsw_sp_vport);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev,
+ unsigned long event)
+{
+ if (netif_is_bridge_port(port_dev) || netif_is_lag_port(port_dev))
+ return 0;
+
+ return mlxsw_sp_inetaddr_vport_event(port_dev, port_dev, event, 1);
+}
+
+static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev,
+ struct net_device *lag_dev,
+ unsigned long event, u16 vid)
+{
+ struct net_device *port_dev;
+ struct list_head *iter;
+ int err;
+
+ netdev_for_each_lower_dev(lag_dev, port_dev, iter) {
+ if (mlxsw_sp_port_dev_check(port_dev)) {
+ err = mlxsw_sp_inetaddr_vport_event(l3_dev, port_dev,
+ event, vid);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev,
+ unsigned long event)
+{
+ if (netif_is_bridge_port(lag_dev))
+ return 0;
+
+ return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1);
+}
+
+static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev)
+{
+ u16 fid;
+
+ if (is_vlan_dev(l3_dev))
+ fid = vlan_dev_vlan_id(l3_dev);
+ else if (mlxsw_sp->master_bridge.dev == l3_dev)
+ fid = 1;
+ else
+ return mlxsw_sp_vfid_find(mlxsw_sp, l3_dev);
+
+ return mlxsw_sp_fid_find(mlxsw_sp, fid);
+}
+
+static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
+{
+ return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+}
+
+static u16 mlxsw_sp_flood_table_index_get(u16 fid)
+{
+ return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
+}
+
+static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
+ bool set)
+{
+ enum mlxsw_flood_table_type table_type;
+ char *sftr_pl;
+ u16 index;
+ int err;
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+
+ table_type = mlxsw_sp_flood_table_type_get(fid);
+ index = mlxsw_sp_flood_table_index_get(fid);
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type,
+ 1, MLXSW_PORT_ROUTER_PORT, set);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+ kfree(sftr_pl);
+ return err;
+}
+
+static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
+{
+ if (mlxsw_sp_fid_is_vfid(fid))
+ return MLXSW_REG_RITR_FID_IF;
+ else
+ return MLXSW_REG_RITR_VLAN_IF;
+}
+
+static int mlxsw_sp_rif_bridge_op(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ u16 fid, u16 rif,
+ bool create)
+{
+ enum mlxsw_reg_ritr_if_type rif_type;
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+
+ rif_type = mlxsw_sp_rif_type_get(fid);
+ mlxsw_reg_ritr_pack(ritr_pl, create, rif_type, rif, l3_dev->mtu,
+ l3_dev->dev_addr);
+ mlxsw_reg_ritr_fid_set(ritr_pl, rif_type, fid);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *l3_dev,
+ struct mlxsw_sp_fid *f)
+{
+ struct mlxsw_sp_rif *r;
+ u16 rif;
+ int err;
+
+ rif = mlxsw_sp_avail_rif_get(mlxsw_sp);
+ if (rif == MLXSW_SP_RIF_MAX)
+ return -ERANGE;
+
+ err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
+ if (err)
+ goto err_rif_bridge_op;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ r = mlxsw_sp_rif_alloc(rif, l3_dev, f);
+ if (!r) {
+ err = -ENOMEM;
+ goto err_rif_alloc;
+ }
+
+ f->r = r;
+ mlxsw_sp->rifs[rif] = r;
+
+ netdev_dbg(l3_dev, "RIF=%d created\n", rif);
+
+ return 0;
+
+err_rif_alloc:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
+err_rif_fdb_op:
+ mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
+err_rif_bridge_op:
+ mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+ return err;
+}
+
+void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *r)
+{
+ struct net_device *l3_dev = r->dev;
+ struct mlxsw_sp_fid *f = r->f;
+ u16 rif = r->rif;
+
+ mlxsw_sp->rifs[rif] = NULL;
+ f->r = NULL;
+
+ kfree(r);
+
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
+
+ mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
+
+ mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+
+ netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
+}
+
+static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev,
+ struct net_device *br_dev,
+ unsigned long event)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev);
+ struct mlxsw_sp_fid *f;
+
+ /* FID can either be an actual FID if the L3 device is the
+ * VLAN-aware bridge or a VLAN device on top. Otherwise, the
+ * L3 device is a VLAN-unaware bridge and we get a vFID.
+ */
+ f = mlxsw_sp_bridge_fid_get(mlxsw_sp, l3_dev);
+ if (WARN_ON(!f))
+ return -EINVAL;
+
+ switch (event) {
+ case NETDEV_UP:
+ return mlxsw_sp_rif_bridge_create(mlxsw_sp, l3_dev, f);
+ case NETDEV_DOWN:
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+ break;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev,
+ unsigned long event)
+{
+ struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev);
+ u16 vid = vlan_dev_vlan_id(vlan_dev);
+
+ if (mlxsw_sp_port_dev_check(real_dev))
+ return mlxsw_sp_inetaddr_vport_event(vlan_dev, real_dev, event,
+ vid);
+ else if (netif_is_lag_master(real_dev))
+ return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event,
+ vid);
+ else if (netif_is_bridge_master(real_dev) &&
+ mlxsw_sp->master_bridge.dev == real_dev)
+ return mlxsw_sp_inetaddr_bridge_event(vlan_dev, real_dev,
+ event);
+
+ return 0;
+}
+
+static int mlxsw_sp_inetaddr_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
+ struct net_device *dev = ifa->ifa_dev->dev;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *r;
+ int err = 0;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ goto out;
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!mlxsw_sp_rif_should_config(r, event))
+ goto out;
+
+ if (mlxsw_sp_port_dev_check(dev))
+ err = mlxsw_sp_inetaddr_port_event(dev, event);
+ else if (netif_is_lag_master(dev))
+ err = mlxsw_sp_inetaddr_lag_event(dev, event);
+ else if (netif_is_bridge_master(dev))
+ err = mlxsw_sp_inetaddr_bridge_event(dev, dev, event);
+ else if (is_vlan_dev(dev))
+ err = mlxsw_sp_inetaddr_vlan_event(dev, event);
+
+out:
+ return notifier_from_errno(err);
+}
+
+static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif,
+ const char *mac, int mtu)
+{
+ char ritr_pl[MLXSW_REG_RITR_LEN];
+ int err;
+
+ mlxsw_reg_ritr_rif_pack(ritr_pl, rif);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+ if (err)
+ return err;
+
+ mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
+ mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
+ mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
+}
+
+static int mlxsw_sp_netdevice_router_port_event(struct net_device *dev)
+{
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_rif *r;
+ int err;
+
+ mlxsw_sp = mlxsw_sp_lower_get(dev);
+ if (!mlxsw_sp)
+ return 0;
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ if (!r)
+ return 0;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, false);
+ if (err)
+ return err;
+
+ err = mlxsw_sp_rif_edit(mlxsw_sp, r->rif, dev->dev_addr, dev->mtu);
+ if (err)
+ goto err_rif_edit;
+
+ err = mlxsw_sp_rif_fdb_op(mlxsw_sp, dev->dev_addr, r->f->fid, true);
+ if (err)
+ goto err_rif_fdb_op;
+
+ ether_addr_copy(r->addr, dev->dev_addr);
+ r->mtu = dev->mtu;
+
+ netdev_dbg(dev, "Updated RIF=%d\n", r->rif);
+
+ return 0;
+
+err_rif_fdb_op:
+ mlxsw_sp_rif_edit(mlxsw_sp, r->rif, r->addr, r->mtu);
+err_rif_edit:
+ mlxsw_sp_rif_fdb_op(mlxsw_sp, r->addr, r->f->fid, true);
+ return err;
+}
+
+static bool mlxsw_sp_lag_port_fid_member(struct mlxsw_sp_port *lag_port,
+ u16 fid)
+{
+ if (mlxsw_sp_fid_is_vfid(fid))
+ return mlxsw_sp_port_vport_find_by_fid(lag_port, fid);
+ else
+ return test_bit(fid, lag_port->active_vlans);
+}
+
+static bool mlxsw_sp_port_fdb_should_flush(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char sfdf_pl[MLXSW_REG_SFDF_LEN];
+ u8 local_port = mlxsw_sp_port->local_port;
+ u16 lag_id = mlxsw_sp_port->lag_id;
+ int i, count = 0;
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_PORT);
- mlxsw_reg_sfdf_system_port_set(sfdf_pl, mlxsw_sp_port->local_port);
+ if (!mlxsw_sp_port->lagged)
+ return true;
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
+ for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) {
+ struct mlxsw_sp_port *lag_port;
+
+ lag_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
+ if (!lag_port || lag_port->local_port == local_port)
+ continue;
+ if (mlxsw_sp_lag_port_fid_member(lag_port, fid))
+ count++;
+ }
+
+ return !count;
}
static int
@@ -2237,17 +3628,8 @@ mlxsw_sp_port_fdb_flush_by_port_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl,
mlxsw_sp_port->local_port);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
-}
-
-static int
-mlxsw_sp_port_fdb_flush_by_lag_id(const struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- char sfdf_pl[MLXSW_REG_SFDF_LEN];
-
- mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_LAG);
- mlxsw_reg_sfdf_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+ netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using Port=%d, FID=%d\n",
+ mlxsw_sp_port->local_port, fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
@@ -2263,71 +3645,64 @@ mlxsw_sp_port_fdb_flush_by_lag_id_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_reg_sfdf_fid_set(sfdf_pl, fid);
mlxsw_reg_sfdf_lag_fid_lag_id_set(sfdf_pl, mlxsw_sp_port->lag_id);
+ netdev_dbg(mlxsw_sp_port->dev, "FDB flushed using LAG ID=%d, FID=%d\n",
+ mlxsw_sp_port->lag_id, fid);
+
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
}
-static int
-__mlxsw_sp_port_fdb_flush(const struct mlxsw_sp_port *mlxsw_sp_port)
+int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
{
- int err, last_err = 0;
- u16 vid;
-
- for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
- err = mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, vid);
- if (err)
- last_err = err;
- }
+ if (!mlxsw_sp_port_fdb_should_flush(mlxsw_sp_port, fid))
+ return 0;
- return last_err;
+ if (mlxsw_sp_port->lagged)
+ return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port,
+ fid);
+ else
+ return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_port, fid);
}
-static int
-__mlxsw_sp_port_fdb_flush_lagged(const struct mlxsw_sp_port *mlxsw_sp_port)
+static void mlxsw_sp_master_bridge_gone_sync(struct mlxsw_sp *mlxsw_sp)
{
- int err, last_err = 0;
- u16 vid;
-
- for (vid = 1; vid < VLAN_N_VID - 1; vid++) {
- err = mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_port, vid);
- if (err)
- last_err = err;
- }
+ struct mlxsw_sp_fid *f, *tmp;
- return last_err;
+ list_for_each_entry_safe(f, tmp, &mlxsw_sp->fids, list)
+ if (--f->ref_count == 0)
+ mlxsw_sp_fid_destroy(mlxsw_sp, f);
+ else
+ WARN_ON_ONCE(1);
}
-static int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port)
+static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
{
- if (!list_empty(&mlxsw_sp_port->vports_list))
- if (mlxsw_sp_port->lagged)
- return __mlxsw_sp_port_fdb_flush_lagged(mlxsw_sp_port);
- else
- return __mlxsw_sp_port_fdb_flush(mlxsw_sp_port);
- else
- if (mlxsw_sp_port->lagged)
- return mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port);
- else
- return mlxsw_sp_port_fdb_flush_by_port(mlxsw_sp_port);
+ return !mlxsw_sp->master_bridge.dev ||
+ mlxsw_sp->master_bridge.dev == br_dev;
}
-static int mlxsw_sp_vport_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_vport)
+static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
{
- u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_vport);
- u16 fid = mlxsw_sp_vfid_to_fid(vfid);
-
- if (mlxsw_sp_vport->lagged)
- return mlxsw_sp_port_fdb_flush_by_lag_id_fid(mlxsw_sp_vport,
- fid);
- else
- return mlxsw_sp_port_fdb_flush_by_port_fid(mlxsw_sp_vport, fid);
+ mlxsw_sp->master_bridge.dev = br_dev;
+ mlxsw_sp->master_bridge.ref_count++;
}
-static bool mlxsw_sp_port_dev_check(const struct net_device *dev)
+static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp)
{
- return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
+ if (--mlxsw_sp->master_bridge.ref_count == 0) {
+ mlxsw_sp->master_bridge.dev = NULL;
+ /* It's possible upper VLAN devices are still holding
+ * references to underlying FIDs. Drop the reference
+ * and release the resources if it was the last one.
+ * If it wasn't, then something bad happened.
+ */
+ mlxsw_sp_master_bridge_gone_sync(mlxsw_sp);
+ }
}
-static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
+static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *br_dev)
{
struct net_device *dev = mlxsw_sp_port->dev;
int err;
@@ -2341,6 +3716,8 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
if (err)
return err;
+ mlxsw_sp_master_bridge_inc(mlxsw_sp_port->mlxsw_sp, br_dev);
+
mlxsw_sp_port->learning = 1;
mlxsw_sp_port->learning_sync = 1;
mlxsw_sp_port->uc_flood = 1;
@@ -2349,16 +3726,14 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
-static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
- bool flush_fdb)
+static void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct net_device *dev = mlxsw_sp_port->dev;
- if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
- netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
-
mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
+ mlxsw_sp_master_bridge_dec(mlxsw_sp_port->mlxsw_sp);
+
mlxsw_sp_port->learning = 0;
mlxsw_sp_port->learning_sync = 0;
mlxsw_sp_port->uc_flood = 0;
@@ -2367,28 +3742,7 @@ static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
/* Add implicit VLAN interface in the device, so that untagged
* packets will be classified to the default vFID.
*/
- return mlxsw_sp_port_add_vid(dev, 0, 1);
-}
-
-static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- return !mlxsw_sp->master_bridge.dev ||
- mlxsw_sp->master_bridge.dev == br_dev;
-}
-
-static void mlxsw_sp_master_bridge_inc(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- mlxsw_sp->master_bridge.dev = br_dev;
- mlxsw_sp->master_bridge.ref_count++;
-}
-
-static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
-{
- if (--mlxsw_sp->master_bridge.ref_count == 0)
- mlxsw_sp->master_bridge.dev = NULL;
+ mlxsw_sp_port_add_vid(dev, 0, 1);
}
static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
@@ -2504,6 +3858,45 @@ static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
return -EBUSY;
}
+static void
+mlxsw_sp_port_pvid_vport_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 lag_id)
+{
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct mlxsw_sp_fid *f;
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
+ if (WARN_ON(!mlxsw_sp_vport))
+ return;
+
+ /* If vPort is assigned a RIF, then leave it since it's no
+ * longer valid.
+ */
+ f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+ if (f)
+ f->leave(mlxsw_sp_vport);
+
+ mlxsw_sp_vport->lag_id = lag_id;
+ mlxsw_sp_vport->lagged = 1;
+}
+
+static void
+mlxsw_sp_port_pvid_vport_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_port *mlxsw_sp_vport;
+ struct mlxsw_sp_fid *f;
+
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, 1);
+ if (WARN_ON(!mlxsw_sp_vport))
+ return;
+
+ f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+ if (f)
+ f->leave(mlxsw_sp_vport);
+
+ mlxsw_sp_vport->lagged = 0;
+}
+
static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
struct net_device *lag_dev)
{
@@ -2539,6 +3932,9 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port->lag_id = lag_id;
mlxsw_sp_port->lagged = 1;
lag->ref_count++;
+
+ mlxsw_sp_port_pvid_vport_lag_join(mlxsw_sp_port, lag_id);
+
return 0;
err_col_port_enable:
@@ -2549,65 +3945,35 @@ err_col_port_add:
return err;
}
-static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev,
- bool flush_fdb);
-
-static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *lag_dev)
+static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_port *mlxsw_sp_vport;
- struct mlxsw_sp_upper *lag;
u16 lag_id = mlxsw_sp_port->lag_id;
- int err;
+ struct mlxsw_sp_upper *lag;
if (!mlxsw_sp_port->lagged)
- return 0;
+ return;
lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
WARN_ON(lag->ref_count == 0);
- err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
- if (err)
- return err;
- err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
- if (err)
- return err;
-
- /* In case we leave a LAG device that has bridges built on top,
- * then their teardown sequence is never issued and we need to
- * invoke the necessary cleanup routines ourselves.
- */
- list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
- vport.list) {
- struct net_device *br_dev;
-
- if (!mlxsw_sp_vport->bridged)
- continue;
-
- br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
- mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, false);
- }
+ mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id);
+ mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
if (mlxsw_sp_port->bridged) {
mlxsw_sp_port_active_vlans_del(mlxsw_sp_port);
- mlxsw_sp_port_bridge_leave(mlxsw_sp_port, false);
- mlxsw_sp_master_bridge_dec(mlxsw_sp, NULL);
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
}
- if (lag->ref_count == 1) {
- if (mlxsw_sp_port_fdb_flush_by_lag_id(mlxsw_sp_port))
- netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
- err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
- if (err)
- return err;
- }
+ if (lag->ref_count == 1)
+ mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
mlxsw_sp_port->local_port);
mlxsw_sp_port->lagged = 0;
lag->ref_count--;
- return 0;
+
+ mlxsw_sp_port_pvid_vport_lag_leave(mlxsw_sp_port);
}
static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -2656,42 +4022,25 @@ static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid = vlan_dev_vlan_id(vlan_dev);
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport) {
- WARN_ON(!mlxsw_sp_vport);
+ if (WARN_ON(!mlxsw_sp_vport))
return -EINVAL;
- }
mlxsw_sp_vport->dev = vlan_dev;
return 0;
}
-static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
- struct net_device *vlan_dev)
+static void mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *vlan_dev)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
u16 vid = vlan_dev_vlan_id(vlan_dev);
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport) {
- WARN_ON(!mlxsw_sp_vport);
- return -EINVAL;
- }
-
- /* When removing a VLAN device while still bridged we should first
- * remove it from the bridge, as we receive the bridge's notification
- * when the vPort is already gone.
- */
- if (mlxsw_sp_vport->bridged) {
- struct net_device *br_dev;
-
- br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
- mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev, true);
- }
+ if (WARN_ON(!mlxsw_sp_vport))
+ return;
mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
-
- return 0;
}
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
@@ -2701,7 +4050,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *upper_dev;
struct mlxsw_sp *mlxsw_sp;
- int err;
+ int err = 0;
mlxsw_sp_port = netdev_priv(dev);
mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
@@ -2710,73 +4059,56 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
- if (!info->master || !info->linking)
+ if (!is_vlan_dev(upper_dev) &&
+ !netif_is_lag_master(upper_dev) &&
+ !netif_is_bridge_master(upper_dev))
+ return -EINVAL;
+ if (!info->linking)
break;
/* HW limitation forbids to put ports to multiple bridges. */
if (netif_is_bridge_master(upper_dev) &&
!mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev))
- return NOTIFY_BAD;
+ return -EINVAL;
if (netif_is_lag_master(upper_dev) &&
!mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
info->upper_info))
- return NOTIFY_BAD;
+ return -EINVAL;
+ if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
+ return -EINVAL;
+ if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
+ !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
+ return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
if (is_vlan_dev(upper_dev)) {
- if (info->linking) {
+ if (info->linking)
err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
upper_dev);
- if (err) {
- netdev_err(dev, "Failed to link VLAN device\n");
- return NOTIFY_BAD;
- }
- } else {
- err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
- upper_dev);
- if (err) {
- netdev_err(dev, "Failed to unlink VLAN device\n");
- return NOTIFY_BAD;
- }
- }
+ else
+ mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
+ upper_dev);
} else if (netif_is_bridge_master(upper_dev)) {
- if (info->linking) {
- err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
- if (err) {
- netdev_err(dev, "Failed to join bridge\n");
- return NOTIFY_BAD;
- }
- mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev);
- } else {
- err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
- true);
- mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev);
- if (err) {
- netdev_err(dev, "Failed to leave bridge\n");
- return NOTIFY_BAD;
- }
- }
+ if (info->linking)
+ err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
+ upper_dev);
+ else
+ mlxsw_sp_port_bridge_leave(mlxsw_sp_port);
} else if (netif_is_lag_master(upper_dev)) {
- if (info->linking) {
+ if (info->linking)
err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
upper_dev);
- if (err) {
- netdev_err(dev, "Failed to join link aggregation\n");
- return NOTIFY_BAD;
- }
- } else {
- err = mlxsw_sp_port_lag_leave(mlxsw_sp_port,
- upper_dev);
- if (err) {
- netdev_err(dev, "Failed to leave link aggregation\n");
- return NOTIFY_BAD;
- }
- }
+ else
+ mlxsw_sp_port_lag_leave(mlxsw_sp_port,
+ upper_dev);
+ } else {
+ err = -EINVAL;
+ WARN_ON(1);
}
break;
}
- return NOTIFY_DONE;
+ return err;
}
static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
@@ -2800,7 +4132,7 @@ static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
break;
}
- return NOTIFY_DONE;
+ return 0;
}
static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
@@ -2814,7 +4146,7 @@ static int mlxsw_sp_netdevice_port_event(struct net_device *dev,
return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr);
}
- return NOTIFY_DONE;
+ return 0;
}
static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
@@ -2827,218 +4159,230 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
netdev_for_each_lower_dev(lag_dev, dev, iter) {
if (mlxsw_sp_port_dev_check(dev)) {
ret = mlxsw_sp_netdevice_port_event(dev, event, ptr);
- if (ret == NOTIFY_BAD)
+ if (ret)
return ret;
}
}
- return NOTIFY_DONE;
+ return 0;
}
-static struct mlxsw_sp_vfid *
-mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
- const struct net_device *br_dev)
+static int mlxsw_sp_master_bridge_vlan_link(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vlan_dev)
{
- struct mlxsw_sp_vfid *vfid;
-
- list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
- if (vfid->br_dev == br_dev)
- return vfid;
+ u16 fid = vlan_dev_vlan_id(vlan_dev);
+ struct mlxsw_sp_fid *f;
+
+ f = mlxsw_sp_fid_find(mlxsw_sp, fid);
+ if (!f) {
+ f = mlxsw_sp_fid_create(mlxsw_sp, fid);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
}
- return NULL;
+ f->ref_count++;
+
+ return 0;
+}
+
+static void mlxsw_sp_master_bridge_vlan_unlink(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *vlan_dev)
+{
+ u16 fid = vlan_dev_vlan_id(vlan_dev);
+ struct mlxsw_sp_fid *f;
+
+ f = mlxsw_sp_fid_find(mlxsw_sp, fid);
+ if (f && f->r)
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+ if (f && --f->ref_count == 0)
+ mlxsw_sp_fid_destroy(mlxsw_sp, f);
}
-static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
+static int mlxsw_sp_netdevice_bridge_event(struct net_device *br_dev,
+ unsigned long event, void *ptr)
{
- return vfid - MLXSW_SP_VFID_PORT_MAX;
+ struct netdev_notifier_changeupper_info *info;
+ struct net_device *upper_dev;
+ struct mlxsw_sp *mlxsw_sp;
+ int err;
+
+ mlxsw_sp = mlxsw_sp_lower_get(br_dev);
+ if (!mlxsw_sp)
+ return 0;
+ if (br_dev != mlxsw_sp->master_bridge.dev)
+ return 0;
+
+ info = ptr;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ upper_dev = info->upper_dev;
+ if (!is_vlan_dev(upper_dev))
+ break;
+ if (info->linking) {
+ err = mlxsw_sp_master_bridge_vlan_link(mlxsw_sp,
+ upper_dev);
+ if (err)
+ return err;
+ } else {
+ mlxsw_sp_master_bridge_vlan_unlink(mlxsw_sp, upper_dev);
+ }
+ break;
+ }
+
+ return 0;
}
-static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
+static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
{
- return MLXSW_SP_VFID_PORT_MAX + br_vfid;
+ return find_first_zero_bit(mlxsw_sp->vfids.mapped,
+ MLXSW_SP_VFID_MAX);
}
-static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
+static int mlxsw_sp_vfid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
{
- return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
- MLXSW_SP_VFID_BR_MAX);
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
}
-static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
- struct net_device *br_dev)
+static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport);
+
+static struct mlxsw_sp_fid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *br_dev)
{
struct device *dev = mlxsw_sp->bus_info->dev;
- struct mlxsw_sp_vfid *vfid;
- u16 n_vfid;
+ struct mlxsw_sp_fid *f;
+ u16 vfid, fid;
int err;
- n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
- if (n_vfid == MLXSW_SP_VFID_MAX) {
+ vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
+ if (vfid == MLXSW_SP_VFID_MAX) {
dev_err(dev, "No available vFIDs\n");
return ERR_PTR(-ERANGE);
}
- err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
+ fid = mlxsw_sp_vfid_to_fid(vfid);
+ err = mlxsw_sp_vfid_op(mlxsw_sp, fid, true);
if (err) {
- dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
+ dev_err(dev, "Failed to create FID=%d\n", fid);
return ERR_PTR(err);
}
- vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
- if (!vfid)
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
goto err_allocate_vfid;
- vfid->vfid = n_vfid;
- vfid->br_dev = br_dev;
+ f->leave = mlxsw_sp_vport_vfid_leave;
+ f->fid = fid;
+ f->dev = br_dev;
- list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
- set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
+ list_add(&f->list, &mlxsw_sp->vfids.list);
+ set_bit(vfid, mlxsw_sp->vfids.mapped);
- return vfid;
+ return f;
err_allocate_vfid:
- __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
+ mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
return ERR_PTR(-ENOMEM);
}
-static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_vfid *vfid)
+static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fid *f)
{
- u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
+ u16 vfid = mlxsw_sp_fid_to_vfid(f->fid);
+ u16 fid = f->fid;
+
+ clear_bit(vfid, mlxsw_sp->vfids.mapped);
+ list_del(&f->list);
- clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
- list_del(&vfid->list);
+ if (f->r)
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
- __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
+ kfree(f);
- kfree(vfid);
+ mlxsw_sp_vfid_op(mlxsw_sp, fid, false);
}
-static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
- struct net_device *br_dev,
- bool flush_fdb)
+static int mlxsw_sp_vport_fid_map(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
+ bool valid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
- struct net_device *dev = mlxsw_sp_vport->dev;
- struct mlxsw_sp_vfid *vfid, *new_vfid;
- int err;
-
- vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
- if (!vfid) {
- WARN_ON(!vfid);
- return -EINVAL;
- }
-
- /* We need a vFID to go back to after leaving the bridge's vFID. */
- new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
- if (!new_vfid) {
- new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
- if (IS_ERR(new_vfid)) {
- netdev_err(dev, "Failed to create vFID for VID=%d\n",
- vid);
- return PTR_ERR(new_vfid);
- }
- }
- /* Invalidate existing {Port, VID} to vFID mapping and create a new
- * one for the new vFID.
- */
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- false,
- mlxsw_sp_vfid_to_fid(vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
- vfid->vfid);
- goto err_port_vid_to_fid_invalidate;
- }
+ return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, mt, valid, fid,
+ vid);
+}
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- true,
- mlxsw_sp_vfid_to_fid(new_vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
- new_vfid->vfid);
- goto err_port_vid_to_fid_validate;
- }
+static int mlxsw_sp_vport_vfid_join(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct net_device *br_dev)
+{
+ struct mlxsw_sp_fid *f;
+ int err;
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
- if (err) {
- netdev_err(dev, "Failed to disable learning\n");
- goto err_port_vid_learning_set;
+ f = mlxsw_sp_vfid_find(mlxsw_sp_vport->mlxsw_sp, br_dev);
+ if (!f) {
+ f = mlxsw_sp_vfid_create(mlxsw_sp_vport->mlxsw_sp, br_dev);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
}
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
- false);
- if (err) {
- netdev_err(dev, "Failed clear to clear flooding\n");
+ err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, true);
+ if (err)
goto err_vport_flood_set;
- }
-
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
- MLXSW_REG_SPMS_STATE_FORWARDING);
- if (err) {
- netdev_err(dev, "Failed to set STP state\n");
- goto err_port_stp_state_set;
- }
- if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
- netdev_err(dev, "Failed to flush FDB\n");
+ err = mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, true);
+ if (err)
+ goto err_vport_fid_map;
- /* Switch between the vFIDs and destroy the old one if needed. */
- new_vfid->nr_vports++;
- mlxsw_sp_vport->vport.vfid = new_vfid;
- vfid->nr_vports--;
- if (!vfid->nr_vports)
- mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, f);
+ f->ref_count++;
- mlxsw_sp_vport->learning = 0;
- mlxsw_sp_vport->learning_sync = 0;
- mlxsw_sp_vport->uc_flood = 0;
- mlxsw_sp_vport->bridged = 0;
+ netdev_dbg(mlxsw_sp_vport->dev, "Joined FID=%d\n", f->fid);
return 0;
-err_port_stp_state_set:
+err_vport_fid_map:
+ mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
err_vport_flood_set:
-err_port_vid_learning_set:
-err_port_vid_to_fid_validate:
-err_port_vid_to_fid_invalidate:
- /* Rollback vFID only if new. */
- if (!new_vfid->nr_vports)
- mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
+ if (!f->ref_count)
+ mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
return err;
}
+static void mlxsw_sp_vport_vfid_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+ netdev_dbg(mlxsw_sp_vport->dev, "Left FID=%d\n", f->fid);
+
+ mlxsw_sp_vport_fid_map(mlxsw_sp_vport, f->fid, false);
+
+ mlxsw_sp_vport_flood_set(mlxsw_sp_vport, f->fid, false);
+
+ mlxsw_sp_port_fdb_flush(mlxsw_sp_vport, f->fid);
+
+ mlxsw_sp_vport_fid_set(mlxsw_sp_vport, NULL);
+ if (--f->ref_count == 0)
+ mlxsw_sp_vfid_destroy(mlxsw_sp_vport->mlxsw_sp, f);
+}
+
static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
struct net_device *br_dev)
{
- struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
struct net_device *dev = mlxsw_sp_vport->dev;
- struct mlxsw_sp_vfid *vfid;
int err;
- vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
- if (!vfid) {
- vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
- if (IS_ERR(vfid)) {
- netdev_err(dev, "Failed to create bridge vFID\n");
- return PTR_ERR(vfid);
- }
- }
+ if (f && !WARN_ON(!f->leave))
+ f->leave(mlxsw_sp_vport);
- err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
+ err = mlxsw_sp_vport_vfid_join(mlxsw_sp_vport, br_dev);
if (err) {
- netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
- vfid->vfid);
- goto err_port_flood_set;
+ netdev_err(dev, "Failed to join vFID\n");
+ return err;
}
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
@@ -3047,38 +4391,6 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
goto err_port_vid_learning_set;
}
- /* We need to invalidate existing {Port, VID} to vFID mapping and
- * create a new one for the bridge's vFID.
- */
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- false,
- mlxsw_sp_vfid_to_fid(old_vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
- old_vfid->vfid);
- goto err_port_vid_to_fid_invalidate;
- }
-
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
- true,
- mlxsw_sp_vfid_to_fid(vfid->vfid),
- vid);
- if (err) {
- netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
- vfid->vfid);
- goto err_port_vid_to_fid_validate;
- }
-
- /* Switch between the vFIDs and destroy the old one if needed. */
- vfid->nr_vports++;
- mlxsw_sp_vport->vport.vfid = vfid;
- old_vfid->nr_vports--;
- if (!old_vfid->nr_vports)
- mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
-
mlxsw_sp_vport->learning = 1;
mlxsw_sp_vport->learning_sync = 1;
mlxsw_sp_vport->uc_flood = 1;
@@ -3086,20 +4398,25 @@ static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
return 0;
-err_port_vid_to_fid_validate:
- mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
- MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
- mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
-err_port_vid_to_fid_invalidate:
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
err_port_vid_learning_set:
- mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
-err_port_flood_set:
- if (!vfid->nr_vports)
- mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
+ mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
return err;
}
+static void mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
+
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
+
+ mlxsw_sp_vport_vfid_leave(mlxsw_sp_vport);
+
+ mlxsw_sp_vport->learning = 0;
+ mlxsw_sp_vport->learning_sync = 0;
+ mlxsw_sp_vport->uc_flood = 0;
+ mlxsw_sp_vport->bridged = 0;
+}
+
static bool
mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
const struct net_device *br_dev)
@@ -3108,7 +4425,9 @@ mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
vport.list) {
- if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
+ struct net_device *dev = mlxsw_sp_vport_dev_get(mlxsw_sp_vport);
+
+ if (dev && dev == br_dev)
return false;
}
@@ -3123,56 +4442,39 @@ static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
struct netdev_notifier_changeupper_info *info = ptr;
struct mlxsw_sp_port *mlxsw_sp_vport;
struct net_device *upper_dev;
- int err;
+ int err = 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
switch (event) {
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
- if (!info->master || !info->linking)
- break;
if (!netif_is_bridge_master(upper_dev))
- return NOTIFY_BAD;
+ return -EINVAL;
+ if (!info->linking)
+ break;
/* We can't have multiple VLAN interfaces configured on
* the same port and being members in the same bridge.
*/
if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
upper_dev))
- return NOTIFY_BAD;
+ return -EINVAL;
break;
case NETDEV_CHANGEUPPER:
upper_dev = info->upper_dev;
- if (!info->master)
- break;
if (info->linking) {
- if (!mlxsw_sp_vport) {
- WARN_ON(!mlxsw_sp_vport);
- return NOTIFY_BAD;
- }
+ if (WARN_ON(!mlxsw_sp_vport))
+ return -EINVAL;
err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
upper_dev);
- if (err) {
- netdev_err(dev, "Failed to join bridge\n");
- return NOTIFY_BAD;
- }
} else {
- /* We ignore bridge's unlinking notifications if vPort
- * is gone, since we already left the bridge when the
- * VLAN device was unlinked from the real device.
- */
if (!mlxsw_sp_vport)
- return NOTIFY_DONE;
- err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
- upper_dev, true);
- if (err) {
- netdev_err(dev, "Failed to leave bridge\n");
- return NOTIFY_BAD;
- }
+ return 0;
+ mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport);
}
}
- return NOTIFY_DONE;
+ return err;
}
static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
@@ -3187,12 +4489,12 @@ static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
if (mlxsw_sp_port_dev_check(dev)) {
ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
vid);
- if (ret == NOTIFY_BAD)
+ if (ret)
return ret;
}
}
- return NOTIFY_DONE;
+ return 0;
}
static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
@@ -3208,41 +4510,58 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
vid);
- return NOTIFY_DONE;
+ return 0;
}
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
-
- if (mlxsw_sp_port_dev_check(dev))
- return mlxsw_sp_netdevice_port_event(dev, event, ptr);
-
- if (netif_is_lag_master(dev))
- return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
-
- if (is_vlan_dev(dev))
- return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
-
- return NOTIFY_DONE;
+ int err = 0;
+
+ if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
+ err = mlxsw_sp_netdevice_router_port_event(dev);
+ else if (mlxsw_sp_port_dev_check(dev))
+ err = mlxsw_sp_netdevice_port_event(dev, event, ptr);
+ else if (netif_is_lag_master(dev))
+ err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
+ else if (netif_is_bridge_master(dev))
+ err = mlxsw_sp_netdevice_bridge_event(dev, event, ptr);
+ else if (is_vlan_dev(dev))
+ err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
+
+ return notifier_from_errno(err);
}
static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
.notifier_call = mlxsw_sp_netdevice_event,
};
+static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_inetaddr_event,
+ .priority = 10, /* Must be called before FIB notifier block */
+};
+
+static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_router_netevent_event,
+};
+
static int __init mlxsw_sp_module_init(void)
{
int err;
register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
+ register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
+ register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+
err = mlxsw_core_driver_register(&mlxsw_sp_driver);
if (err)
goto err_core_driver_register;
return 0;
err_core_driver_register:
+ unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+ unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
return err;
}
@@ -3250,6 +4569,8 @@ err_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+ unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+ unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 4b8abaf06321..ac48abebe904 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -39,19 +39,22 @@
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/rhashtable.h>
#include <linux/bitops.h>
#include <linux/if_vlan.h>
#include <linux/list.h>
+#include <linux/dcbnl.h>
+#include <linux/in6.h>
#include <net/switchdev.h>
-#include <net/devlink.h>
#include "port.h"
#include "core.h"
#define MLXSW_SP_VFID_BASE VLAN_N_VID
-#define MLXSW_SP_VFID_PORT_MAX 512 /* Non-bridged VLAN interfaces */
-#define MLXSW_SP_VFID_BR_MAX 8192 /* Bridged VLAN interfaces */
-#define MLXSW_SP_VFID_MAX (MLXSW_SP_VFID_PORT_MAX + MLXSW_SP_VFID_BR_MAX)
+#define MLXSW_SP_VFID_MAX 6656 /* Bridged VLAN interfaces */
+
+#define MLXSW_SP_RFID_BASE 15360
+#define MLXSW_SP_RIF_MAX 800
#define MLXSW_SP_LAG_MAX 64
#define MLXSW_SP_PORT_PER_LAG_MAX 16
@@ -60,8 +63,36 @@
#define MLXSW_SP_PORTS_PER_CLUSTER_MAX 4
+#define MLXSW_SP_LPM_TREE_MIN 2 /* trees 0 and 1 are reserved */
+#define MLXSW_SP_LPM_TREE_MAX 22
+#define MLXSW_SP_LPM_TREE_COUNT (MLXSW_SP_LPM_TREE_MAX - MLXSW_SP_LPM_TREE_MIN)
+
+#define MLXSW_SP_VIRTUAL_ROUTER_MAX 256
+
#define MLXSW_SP_PORT_BASE_SPEED 25000 /* Mb/s */
+#define MLXSW_SP_BYTES_PER_CELL 96
+
+#define MLXSW_SP_BYTES_TO_CELLS(b) DIV_ROUND_UP(b, MLXSW_SP_BYTES_PER_CELL)
+#define MLXSW_SP_CELLS_TO_BYTES(c) (c * MLXSW_SP_BYTES_PER_CELL)
+
+#define MLXSW_SP_KVD_LINEAR_SIZE 65536 /* entries */
+#define MLXSW_SP_KVD_HASH_SINGLE_SIZE 163840 /* entries */
+#define MLXSW_SP_KVD_HASH_DOUBLE_SIZE 32768 /* entries */
+
+/* Maximum delay buffer needed in case of PAUSE frames, in cells.
+ * Assumes 100m cable and maximum MTU.
+ */
+#define MLXSW_SP_PAUSE_DELAY 612
+
+#define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
+
+static inline u16 mlxsw_sp_pfc_delay_get(int mtu, u16 delay)
+{
+ delay = MLXSW_SP_BYTES_TO_CELLS(DIV_ROUND_UP(delay, BITS_PER_BYTE));
+ return MLXSW_SP_CELL_FACTOR * delay + MLXSW_SP_BYTES_TO_CELLS(mtu);
+}
+
struct mlxsw_sp_port;
struct mlxsw_sp_upper {
@@ -69,12 +100,22 @@ struct mlxsw_sp_upper {
unsigned int ref_count;
};
-struct mlxsw_sp_vfid {
+struct mlxsw_sp_fid {
+ void (*leave)(struct mlxsw_sp_port *mlxsw_sp_vport);
struct list_head list;
- u16 nr_vports;
- u16 vfid; /* Starting at 0 */
- struct net_device *br_dev;
- u16 vid;
+ unsigned int ref_count;
+ struct net_device *dev;
+ struct mlxsw_sp_rif *r;
+ u16 fid;
+};
+
+struct mlxsw_sp_rif {
+ struct net_device *dev;
+ unsigned int ref_count;
+ struct mlxsw_sp_fid *f;
+ unsigned char addr[ETH_ALEN];
+ int mtu;
+ u16 rif;
};
struct mlxsw_sp_mid {
@@ -97,23 +138,144 @@ static inline u16 mlxsw_sp_fid_to_vfid(u16 fid)
static inline bool mlxsw_sp_fid_is_vfid(u16 fid)
{
- return fid >= MLXSW_SP_VFID_BASE;
+ return fid >= MLXSW_SP_VFID_BASE && fid < MLXSW_SP_RFID_BASE;
}
-struct mlxsw_sp {
+static inline bool mlxsw_sp_fid_is_rfid(u16 fid)
+{
+ return fid >= MLXSW_SP_RFID_BASE;
+}
+
+static inline u16 mlxsw_sp_rif_sp_to_fid(u16 rif)
+{
+ return MLXSW_SP_RFID_BASE + rif;
+}
+
+struct mlxsw_sp_sb_pr {
+ enum mlxsw_reg_sbpr_mode mode;
+ u32 size;
+};
+
+struct mlxsw_cp_sb_occ {
+ u32 cur;
+ u32 max;
+};
+
+struct mlxsw_sp_sb_cm {
+ u32 min_buff;
+ u32 max_buff;
+ u8 pool;
+ struct mlxsw_cp_sb_occ occ;
+};
+
+struct mlxsw_sp_sb_pm {
+ u32 min_buff;
+ u32 max_buff;
+ struct mlxsw_cp_sb_occ occ;
+};
+
+#define MLXSW_SP_SB_POOL_COUNT 4
+#define MLXSW_SP_SB_TC_COUNT 8
+
+struct mlxsw_sp_sb {
+ struct mlxsw_sp_sb_pr prs[2][MLXSW_SP_SB_POOL_COUNT];
struct {
- struct list_head list;
- unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_PORT_MAX)];
- } port_vfids;
+ struct mlxsw_sp_sb_cm cms[2][MLXSW_SP_SB_TC_COUNT];
+ struct mlxsw_sp_sb_pm pms[2][MLXSW_SP_SB_POOL_COUNT];
+ } ports[MLXSW_PORT_MAX_PORTS];
+};
+
+#define MLXSW_SP_PREFIX_COUNT (sizeof(struct in6_addr) * BITS_PER_BYTE)
+
+struct mlxsw_sp_prefix_usage {
+ DECLARE_BITMAP(b, MLXSW_SP_PREFIX_COUNT);
+};
+
+enum mlxsw_sp_l3proto {
+ MLXSW_SP_L3_PROTO_IPV4,
+ MLXSW_SP_L3_PROTO_IPV6,
+};
+
+struct mlxsw_sp_lpm_tree {
+ u8 id; /* tree ID */
+ unsigned int ref_count;
+ enum mlxsw_sp_l3proto proto;
+ struct mlxsw_sp_prefix_usage prefix_usage;
+};
+
+struct mlxsw_sp_fib;
+
+struct mlxsw_sp_vr {
+ u16 id; /* virtual router ID */
+ bool used;
+ enum mlxsw_sp_l3proto proto;
+ u32 tb_id; /* kernel fib table id */
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ struct mlxsw_sp_fib *fib;
+};
+
+enum mlxsw_sp_span_type {
+ MLXSW_SP_SPAN_EGRESS,
+ MLXSW_SP_SPAN_INGRESS
+};
+
+struct mlxsw_sp_span_inspected_port {
+ struct list_head list;
+ enum mlxsw_sp_span_type type;
+ u8 local_port;
+};
+
+struct mlxsw_sp_span_entry {
+ u8 local_port;
+ bool used;
+ struct list_head bound_ports_list;
+ int ref_count;
+ int id;
+};
+
+enum mlxsw_sp_port_mall_action_type {
+ MLXSW_SP_PORT_MALL_MIRROR,
+};
+
+struct mlxsw_sp_port_mall_mirror_tc_entry {
+ u8 to_local_port;
+ bool ingress;
+};
+
+struct mlxsw_sp_port_mall_tc_entry {
+ struct list_head list;
+ unsigned long cookie;
+ enum mlxsw_sp_port_mall_action_type type;
+ union {
+ struct mlxsw_sp_port_mall_mirror_tc_entry mirror;
+ };
+};
+
+struct mlxsw_sp_router {
+ struct mlxsw_sp_lpm_tree lpm_trees[MLXSW_SP_LPM_TREE_COUNT];
+ struct mlxsw_sp_vr vrs[MLXSW_SP_VIRTUAL_ROUTER_MAX];
+ struct rhashtable neigh_ht;
+ struct {
+ struct delayed_work dw;
+ unsigned long interval; /* ms */
+ } neighs_update;
+ struct delayed_work nexthop_probe_dw;
+#define MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL 5000 /* ms */
+ struct list_head nexthop_group_list;
+ struct list_head nexthop_neighs_list;
+};
+
+struct mlxsw_sp {
struct {
struct list_head list;
- unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_VFID_BR_MAX)];
- } br_vfids;
+ DECLARE_BITMAP(mapped, MLXSW_SP_VFID_MAX);
+ } vfids;
struct {
struct list_head list;
- unsigned long mapped[BITS_TO_LONGS(MLXSW_SP_MID_MAX)];
+ DECLARE_BITMAP(mapped, MLXSW_SP_MID_MAX);
} br_mids;
- unsigned long active_fids[BITS_TO_LONGS(VLAN_N_VID)];
+ struct list_head fids; /* VLAN-aware bridge FIDs */
+ struct mlxsw_sp_rif *rifs[MLXSW_SP_RIF_MAX];
struct mlxsw_sp_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
@@ -130,6 +292,16 @@ struct mlxsw_sp {
struct mlxsw_sp_upper master_bridge;
struct mlxsw_sp_upper lags[MLXSW_SP_LAG_MAX];
u8 port_to_module[MLXSW_PORT_MAX_PORTS];
+ struct mlxsw_sp_sb sb;
+ struct mlxsw_sp_router router;
+ struct {
+ DECLARE_BITMAP(usage, MLXSW_SP_KVD_LINEAR_SIZE);
+ } kvdl;
+
+ struct {
+ struct mlxsw_sp_span_entry *entries;
+ int entries_count;
+ } span;
};
static inline struct mlxsw_sp_upper *
@@ -148,6 +320,7 @@ struct mlxsw_sp_port_pcpu_stats {
};
struct mlxsw_sp_port {
+ struct mlxsw_core_port core_port; /* must be first */
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sp *mlxsw_sp;
@@ -163,17 +336,41 @@ struct mlxsw_sp_port {
u16 lag_id;
struct {
struct list_head list;
- struct mlxsw_sp_vfid *vfid;
+ struct mlxsw_sp_fid *f;
u16 vid;
} vport;
+ struct {
+ u8 tx_pause:1,
+ rx_pause:1;
+ } link;
+ struct {
+ struct ieee_ets *ets;
+ struct ieee_maxrate *maxrate;
+ struct ieee_pfc *pfc;
+ } dcb;
+ struct {
+ u8 module;
+ u8 width;
+ u8 lane;
+ } mapping;
/* 802.1Q bridge VLANs */
unsigned long *active_vlans;
unsigned long *untagged_vlans;
/* VLAN interfaces */
struct list_head vports_list;
- struct devlink_port devlink_port;
+ /* TC handles */
+ struct list_head mall_tc_list;
};
+struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
+void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
+
+static inline bool
+mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause;
+}
+
static inline struct mlxsw_sp_port *
mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
{
@@ -186,28 +383,38 @@ mlxsw_sp_port_lagged_get(struct mlxsw_sp *mlxsw_sp, u16 lag_id, u8 port_index)
return mlxsw_sp_port && mlxsw_sp_port->lagged ? mlxsw_sp_port : NULL;
}
+static inline u16
+mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+{
+ return mlxsw_sp_vport->vport.vid;
+}
+
static inline bool
mlxsw_sp_port_is_vport(const struct mlxsw_sp_port *mlxsw_sp_port)
{
- return mlxsw_sp_port->vport.vfid;
+ u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_port);
+
+ return vid != 0;
}
-static inline struct net_device *
-mlxsw_sp_vport_br_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+static inline void mlxsw_sp_vport_fid_set(struct mlxsw_sp_port *mlxsw_sp_vport,
+ struct mlxsw_sp_fid *f)
{
- return mlxsw_sp_vport->vport.vfid->br_dev;
+ mlxsw_sp_vport->vport.f = f;
}
-static inline u16
-mlxsw_sp_vport_vid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+static inline struct mlxsw_sp_fid *
+mlxsw_sp_vport_fid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
{
- return mlxsw_sp_vport->vport.vid;
+ return mlxsw_sp_vport->vport.f;
}
-static inline u16
-mlxsw_sp_vport_vfid_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
+static inline struct net_device *
+mlxsw_sp_vport_dev_get(const struct mlxsw_sp_port *mlxsw_sp_vport)
{
- return mlxsw_sp_vport->vport.vfid->vfid;
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+ return f ? f->dev : NULL;
}
static inline struct mlxsw_sp_port *
@@ -225,27 +432,99 @@ mlxsw_sp_port_vport_find(const struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
}
static inline struct mlxsw_sp_port *
-mlxsw_sp_port_vport_find_by_vfid(const struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vfid)
+mlxsw_sp_port_vport_find_by_fid(const struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
{
struct mlxsw_sp_port *mlxsw_sp_vport;
list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
vport.list) {
- if (mlxsw_sp_vport_vfid_get(mlxsw_sp_vport) == vfid)
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_vport);
+
+ if (f && f->fid == fid)
return mlxsw_sp_vport;
}
return NULL;
}
+static inline struct mlxsw_sp_fid *mlxsw_sp_fid_find(struct mlxsw_sp *mlxsw_sp,
+ u16 fid)
+{
+ struct mlxsw_sp_fid *f;
+
+ list_for_each_entry(f, &mlxsw_sp->fids, list)
+ if (f->fid == fid)
+ return f;
+
+ return NULL;
+}
+
+static inline struct mlxsw_sp_fid *
+mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *br_dev)
+{
+ struct mlxsw_sp_fid *f;
+
+ list_for_each_entry(f, &mlxsw_sp->vfids.list, list)
+ if (f->dev == br_dev)
+ return f;
+
+ return NULL;
+}
+
+static inline struct mlxsw_sp_rif *
+mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i < MLXSW_SP_RIF_MAX; i++)
+ if (mlxsw_sp->rifs[i] && mlxsw_sp->rifs[i]->dev == dev)
+ return mlxsw_sp->rifs[i];
+
+ return NULL;
+}
+
enum mlxsw_sp_flood_table {
MLXSW_SP_FLOOD_TABLE_UC,
MLXSW_SP_FLOOD_TABLE_BM,
};
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info);
+int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type);
+int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold);
+int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold);
+int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold);
+int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold);
+int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index);
+int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max);
+int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max);
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp);
@@ -257,13 +536,61 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid);
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged);
-int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
- u16 vid);
-int mlxsw_sp_port_kill_vid(struct net_device *dev,
- __be16 __always_unused proto, u16 vid);
-int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
- bool set, bool only_uc);
+int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
+ bool set);
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
+int mlxsw_sp_port_fdb_flush(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid);
+int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
+ bool adding);
+struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid);
+void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f);
+void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *r);
+int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
+ bool dwrr, u8 dwrr_weight);
+int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 switch_prio, u8 tclass);
+int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
+ u8 *prio_tc, bool pause_en,
+ struct ieee_pfc *my_pfc);
+int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ enum mlxsw_reg_qeec_hr hr, u8 index,
+ u8 next_index, u32 maxrate);
+
+#ifdef CONFIG_MLXSW_SPECTRUM_DCB
+
+int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port);
+void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port);
+
+#else
+
+static inline int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return 0;
+}
+
+static inline void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{}
+
+#endif
+
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans);
+int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4);
+int mlxsw_sp_router_neigh_construct(struct net_device *dev,
+ struct neighbour *n);
+void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
+ struct neighbour *n);
+int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
+
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index d59195e3f7fb..953b214f38d0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -34,36 +34,140 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/dcbnl.h>
+#include <linux/if_ether.h>
+#include <linux/list.h>
#include "spectrum.h"
#include "core.h"
#include "port.h"
#include "reg.h"
-struct mlxsw_sp_pb {
- u8 index;
- u16 size;
-};
+static struct mlxsw_sp_sb_pr *mlxsw_sp_sb_pr_get(struct mlxsw_sp *mlxsw_sp,
+ u8 pool,
+ enum mlxsw_reg_sbxx_dir dir)
+{
+ return &mlxsw_sp->sb.prs[dir][pool];
+}
-#define MLXSW_SP_PB(_index, _size) \
- { \
- .index = _index, \
- .size = _size, \
+static struct mlxsw_sp_sb_cm *mlxsw_sp_sb_cm_get(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 pg_buff,
+ enum mlxsw_reg_sbxx_dir dir)
+{
+ return &mlxsw_sp->sb.ports[local_port].cms[dir][pg_buff];
+}
+
+static struct mlxsw_sp_sb_pm *mlxsw_sp_sb_pm_get(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 pool,
+ enum mlxsw_reg_sbxx_dir dir)
+{
+ return &mlxsw_sp->sb.ports[local_port].pms[dir][pool];
+}
+
+static int mlxsw_sp_sb_pr_write(struct mlxsw_sp *mlxsw_sp, u8 pool,
+ enum mlxsw_reg_sbxx_dir dir,
+ enum mlxsw_reg_sbpr_mode mode, u32 size)
+{
+ char sbpr_pl[MLXSW_REG_SBPR_LEN];
+ struct mlxsw_sp_sb_pr *pr;
+ int err;
+
+ mlxsw_reg_sbpr_pack(sbpr_pl, pool, dir, mode, size);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+ if (err)
+ return err;
+
+ pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+ pr->mode = mode;
+ pr->size = size;
+ return 0;
+}
+
+static int mlxsw_sp_sb_cm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 pg_buff, enum mlxsw_reg_sbxx_dir dir,
+ u32 min_buff, u32 max_buff, u8 pool)
+{
+ char sbcm_pl[MLXSW_REG_SBCM_LEN];
+ int err;
+
+ mlxsw_reg_sbcm_pack(sbcm_pl, local_port, pg_buff, dir,
+ min_buff, max_buff, pool);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+ if (err)
+ return err;
+ if (pg_buff < MLXSW_SP_SB_TC_COUNT) {
+ struct mlxsw_sp_sb_cm *cm;
+
+ cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, pg_buff, dir);
+ cm->min_buff = min_buff;
+ cm->max_buff = max_buff;
+ cm->pool = pool;
}
+ return 0;
+}
+
+static int mlxsw_sp_sb_pm_write(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 pool, enum mlxsw_reg_sbxx_dir dir,
+ u32 min_buff, u32 max_buff)
+{
+ char sbpm_pl[MLXSW_REG_SBPM_LEN];
+ struct mlxsw_sp_sb_pm *pm;
+ int err;
+
+ mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false,
+ min_buff, max_buff);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl);
+ if (err)
+ return err;
+
+ pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
+ pm->min_buff = min_buff;
+ pm->max_buff = max_buff;
+ return 0;
+}
+
+static int mlxsw_sp_sb_pm_occ_clear(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 pool, enum mlxsw_reg_sbxx_dir dir,
+ struct list_head *bulk_list)
+{
+ char sbpm_pl[MLXSW_REG_SBPM_LEN];
+
+ mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, true, 0, 0);
+ return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
+ bulk_list, NULL, 0);
+}
+
+static void mlxsw_sp_sb_pm_occ_query_cb(struct mlxsw_core *mlxsw_core,
+ char *sbpm_pl, size_t sbpm_pl_len,
+ unsigned long cb_priv)
+{
+ struct mlxsw_sp_sb_pm *pm = (struct mlxsw_sp_sb_pm *) cb_priv;
+
+ mlxsw_reg_sbpm_unpack(sbpm_pl, &pm->occ.cur, &pm->occ.max);
+}
+
+static int mlxsw_sp_sb_pm_occ_query(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 pool, enum mlxsw_reg_sbxx_dir dir,
+ struct list_head *bulk_list)
+{
+ char sbpm_pl[MLXSW_REG_SBPM_LEN];
+ struct mlxsw_sp_sb_pm *pm;
+
+ pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port, pool, dir);
+ mlxsw_reg_sbpm_pack(sbpm_pl, local_port, pool, dir, false, 0, 0);
+ return mlxsw_reg_trans_query(mlxsw_sp->core, MLXSW_REG(sbpm), sbpm_pl,
+ bulk_list,
+ mlxsw_sp_sb_pm_occ_query_cb,
+ (unsigned long) pm);
+}
-static const struct mlxsw_sp_pb mlxsw_sp_pbs[] = {
- MLXSW_SP_PB(0, 208),
- MLXSW_SP_PB(1, 208),
- MLXSW_SP_PB(2, 208),
- MLXSW_SP_PB(3, 208),
- MLXSW_SP_PB(4, 208),
- MLXSW_SP_PB(5, 208),
- MLXSW_SP_PB(6, 208),
- MLXSW_SP_PB(7, 208),
- MLXSW_SP_PB(9, 208),
+static const u16 mlxsw_sp_pbs[] = {
+ [0] = 2 * MLXSW_SP_BYTES_TO_CELLS(ETH_FRAME_LEN),
+ [9] = 2 * MLXSW_SP_BYTES_TO_CELLS(MLXSW_PORT_MAX_MTU),
};
#define MLXSW_SP_PBS_LEN ARRAY_SIZE(mlxsw_sp_pbs)
+#define MLXSW_SP_PB_UNUSED 8
static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
@@ -73,194 +177,206 @@ static int mlxsw_sp_port_pb_init(struct mlxsw_sp_port *mlxsw_sp_port)
mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port,
0xffff, 0xffff / 2);
for (i = 0; i < MLXSW_SP_PBS_LEN; i++) {
- const struct mlxsw_sp_pb *pb;
-
- pb = &mlxsw_sp_pbs[i];
- mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pb->index, pb->size);
+ if (i == MLXSW_SP_PB_UNUSED)
+ continue;
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, i, mlxsw_sp_pbs[i]);
}
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl,
+ MLXSW_REG_PBMC_PORT_SHARED_BUF_IDX, 0);
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
MLXSW_REG(pbmc), pbmc_pl);
}
-#define MLXSW_SP_SB_BYTES_PER_CELL 96
+static int mlxsw_sp_port_pb_prio_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ char pptb_pl[MLXSW_REG_PPTB_LEN];
+ int i;
-struct mlxsw_sp_sb_pool {
- u8 pool;
- enum mlxsw_reg_sbpr_dir dir;
- enum mlxsw_reg_sbpr_mode mode;
- u32 size;
-};
+ mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, 0);
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
+ pptb_pl);
+}
+
+static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+ if (err)
+ return err;
+ return mlxsw_sp_port_pb_prio_init(mlxsw_sp_port);
+}
-#define MLXSW_SP_SB_POOL_INGRESS_SIZE \
- ((15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS)) / \
- MLXSW_SP_SB_BYTES_PER_CELL)
-#define MLXSW_SP_SB_POOL_EGRESS_SIZE \
- ((14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS)) / \
- MLXSW_SP_SB_BYTES_PER_CELL)
-
-#define MLXSW_SP_SB_POOL(_pool, _dir, _mode, _size) \
- { \
- .pool = _pool, \
- .dir = _dir, \
- .mode = _mode, \
- .size = _size, \
+#define MLXSW_SP_SB_PR_INGRESS_SIZE \
+ (15000000 - (2 * 20000 * MLXSW_PORT_MAX_PORTS))
+#define MLXSW_SP_SB_PR_INGRESS_MNG_SIZE (200 * 1000)
+#define MLXSW_SP_SB_PR_EGRESS_SIZE \
+ (14000000 - (8 * 1500 * MLXSW_PORT_MAX_PORTS))
+
+#define MLXSW_SP_SB_PR(_mode, _size) \
+ { \
+ .mode = _mode, \
+ .size = _size, \
}
-#define MLXSW_SP_SB_POOL_INGRESS(_pool, _size) \
- MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_INGRESS, \
- MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
-
-#define MLXSW_SP_SB_POOL_EGRESS(_pool, _size) \
- MLXSW_SP_SB_POOL(_pool, MLXSW_REG_SBPR_DIR_EGRESS, \
- MLXSW_REG_SBPR_MODE_DYNAMIC, _size)
-
-static const struct mlxsw_sp_sb_pool mlxsw_sp_sb_pools[] = {
- MLXSW_SP_SB_POOL_INGRESS(0, MLXSW_SP_SB_POOL_INGRESS_SIZE),
- MLXSW_SP_SB_POOL_INGRESS(1, 0),
- MLXSW_SP_SB_POOL_INGRESS(2, 0),
- MLXSW_SP_SB_POOL_INGRESS(3, 0),
- MLXSW_SP_SB_POOL_EGRESS(0, MLXSW_SP_SB_POOL_EGRESS_SIZE),
- MLXSW_SP_SB_POOL_EGRESS(1, 0),
- MLXSW_SP_SB_POOL_EGRESS(2, 0),
- MLXSW_SP_SB_POOL_EGRESS(2, MLXSW_SP_SB_POOL_EGRESS_SIZE),
+static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_ingress[] = {
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_SIZE)),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_INGRESS_MNG_SIZE)),
};
-#define MLXSW_SP_SB_POOLS_LEN ARRAY_SIZE(mlxsw_sp_sb_pools)
+#define MLXSW_SP_SB_PRS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_ingress)
+
+static const struct mlxsw_sp_sb_pr mlxsw_sp_sb_prs_egress[] = {
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC,
+ MLXSW_SP_BYTES_TO_CELLS(MLXSW_SP_SB_PR_EGRESS_SIZE)),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_DYNAMIC, 0),
+};
-static int mlxsw_sp_sb_pools_init(struct mlxsw_sp *mlxsw_sp)
+#define MLXSW_SP_SB_PRS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_prs_egress)
+
+static int __mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_reg_sbxx_dir dir,
+ const struct mlxsw_sp_sb_pr *prs,
+ size_t prs_len)
{
- char sbpr_pl[MLXSW_REG_SBPR_LEN];
int i;
int err;
- for (i = 0; i < MLXSW_SP_SB_POOLS_LEN; i++) {
- const struct mlxsw_sp_sb_pool *pool;
+ for (i = 0; i < prs_len; i++) {
+ const struct mlxsw_sp_sb_pr *pr;
- pool = &mlxsw_sp_sb_pools[i];
- mlxsw_reg_sbpr_pack(sbpr_pl, pool->pool, pool->dir,
- pool->mode, pool->size);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbpr), sbpr_pl);
+ pr = &prs[i];
+ err = mlxsw_sp_sb_pr_write(mlxsw_sp, i, dir,
+ pr->mode, pr->size);
if (err)
return err;
}
return 0;
}
-struct mlxsw_sp_sb_cm {
- union {
- u8 pg;
- u8 tc;
- } u;
- enum mlxsw_reg_sbcm_dir dir;
- u32 min_buff;
- u32 max_buff;
- u8 pool;
-};
+static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
-#define MLXSW_SP_SB_CM(_pg_tc, _dir, _min_buff, _max_buff, _pool) \
- { \
- .u.pg = _pg_tc, \
- .dir = _dir, \
- .min_buff = _min_buff, \
- .max_buff = _max_buff, \
- .pool = _pool, \
+ err = __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_INGRESS,
+ mlxsw_sp_sb_prs_ingress,
+ MLXSW_SP_SB_PRS_INGRESS_LEN);
+ if (err)
+ return err;
+ return __mlxsw_sp_sb_prs_init(mlxsw_sp, MLXSW_REG_SBXX_DIR_EGRESS,
+ mlxsw_sp_sb_prs_egress,
+ MLXSW_SP_SB_PRS_EGRESS_LEN);
+}
+
+#define MLXSW_SP_SB_CM(_min_buff, _max_buff, _pool) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool = _pool, \
}
-#define MLXSW_SP_SB_CM_INGRESS(_pg, _min_buff, _max_buff) \
- MLXSW_SP_SB_CM(_pg, MLXSW_REG_SBCM_DIR_INGRESS, \
- _min_buff, _max_buff, 0)
-
-#define MLXSW_SP_SB_CM_EGRESS(_tc, _min_buff, _max_buff) \
- MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, \
- _min_buff, _max_buff, 0)
-
-#define MLXSW_SP_CPU_PORT_SB_CM_EGRESS(_tc) \
- MLXSW_SP_SB_CM(_tc, MLXSW_REG_SBCM_DIR_EGRESS, 104, 2, 3)
-
-static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms[] = {
- MLXSW_SP_SB_CM_INGRESS(0, 10000 / MLXSW_SP_SB_BYTES_PER_CELL, 8),
- MLXSW_SP_SB_CM_INGRESS(1, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(2, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(3, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(4, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(5, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(6, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(7, 0, 0),
- MLXSW_SP_SB_CM_INGRESS(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff),
- MLXSW_SP_SB_CM_EGRESS(0, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(1, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(2, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(3, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(4, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(5, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(6, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(7, 1500 / MLXSW_SP_SB_BYTES_PER_CELL, 9),
- MLXSW_SP_SB_CM_EGRESS(8, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(9, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(10, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(11, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(12, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(13, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(14, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(15, 0, 0),
- MLXSW_SP_SB_CM_EGRESS(16, 1, 0xff),
+static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_ingress[] = {
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 8, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN, 0),
+ MLXSW_SP_SB_CM(0, 0, 0), /* dummy, this PG does not exist */
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(20000), 1, 3),
};
-#define MLXSW_SP_SB_CMS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms)
+#define MLXSW_SP_SB_CMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_ingress)
+
+static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = {
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(1500), 9, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(0, 0, 0),
+ MLXSW_SP_SB_CM(1, 0xff, 0),
+};
+
+#define MLXSW_SP_SB_CMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_cms_egress)
+
+#define MLXSW_SP_CPU_PORT_SB_CM MLXSW_SP_SB_CM(0, 0, 0)
static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(0),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(1),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(2),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(3),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(4),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(5),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(6),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(7),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(8),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(9),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(10),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(11),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(12),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(13),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(14),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(15),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(16),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(17),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(18),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(19),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(20),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(21),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(22),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(23),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(24),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(25),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(26),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(27),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(28),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(29),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(30),
- MLXSW_SP_CPU_PORT_SB_CM_EGRESS(31),
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0),
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_CPU_PORT_SB_CM,
};
#define MLXSW_SP_CPU_PORT_SB_MCS_LEN \
ARRAY_SIZE(mlxsw_sp_cpu_port_sb_cms)
-static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- const struct mlxsw_sp_sb_cm *cms,
- size_t cms_len)
+static int __mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ enum mlxsw_reg_sbxx_dir dir,
+ const struct mlxsw_sp_sb_cm *cms,
+ size_t cms_len)
{
- char sbcm_pl[MLXSW_REG_SBCM_LEN];
int i;
int err;
for (i = 0; i < cms_len; i++) {
const struct mlxsw_sp_sb_cm *cm;
+ if (i == 8 && dir == MLXSW_REG_SBXX_DIR_INGRESS)
+ continue; /* PG number 8 does not exist, skip it */
cm = &cms[i];
- mlxsw_reg_sbcm_pack(sbcm_pl, local_port, cm->u.pg, cm->dir,
- cm->min_buff, cm->max_buff, cm->pool);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbcm), sbcm_pl);
+ err = mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, i, dir,
+ cm->min_buff, cm->max_buff,
+ cm->pool);
if (err)
return err;
}
@@ -269,105 +385,120 @@ static int mlxsw_sp_sb_cms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
static int mlxsw_sp_port_sb_cms_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
- return mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
- mlxsw_sp_port->local_port, mlxsw_sp_sb_cms,
- MLXSW_SP_SB_CMS_LEN);
+ int err;
+
+ err = __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port,
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ mlxsw_sp_sb_cms_ingress,
+ MLXSW_SP_SB_CMS_INGRESS_LEN);
+ if (err)
+ return err;
+ return __mlxsw_sp_sb_cms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port,
+ MLXSW_REG_SBXX_DIR_EGRESS,
+ mlxsw_sp_sb_cms_egress,
+ MLXSW_SP_SB_CMS_EGRESS_LEN);
}
static int mlxsw_sp_cpu_port_sb_cms_init(struct mlxsw_sp *mlxsw_sp)
{
- return mlxsw_sp_sb_cms_init(mlxsw_sp, 0, mlxsw_sp_cpu_port_sb_cms,
- MLXSW_SP_CPU_PORT_SB_MCS_LEN);
+ return __mlxsw_sp_sb_cms_init(mlxsw_sp, 0, MLXSW_REG_SBXX_DIR_EGRESS,
+ mlxsw_sp_cpu_port_sb_cms,
+ MLXSW_SP_CPU_PORT_SB_MCS_LEN);
}
-struct mlxsw_sp_sb_pm {
- u8 pool;
- enum mlxsw_reg_sbpm_dir dir;
- u32 min_buff;
- u32 max_buff;
+#define MLXSW_SP_SB_PM(_min_buff, _max_buff) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ }
+
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_ingress[] = {
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX),
};
-#define MLXSW_SP_SB_PM(_pool, _dir, _min_buff, _max_buff) \
- { \
- .pool = _pool, \
- .dir = _dir, \
- .min_buff = _min_buff, \
- .max_buff = _max_buff, \
- }
+#define MLXSW_SP_SB_PMS_INGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_ingress)
-#define MLXSW_SP_SB_PM_INGRESS(_pool, _min_buff, _max_buff) \
- MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_INGRESS, \
- _min_buff, _max_buff)
-
-#define MLXSW_SP_SB_PM_EGRESS(_pool, _min_buff, _max_buff) \
- MLXSW_SP_SB_PM(_pool, MLXSW_REG_SBPM_DIR_EGRESS, \
- _min_buff, _max_buff)
-
-static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms[] = {
- MLXSW_SP_SB_PM_INGRESS(0, 0, 0xff),
- MLXSW_SP_SB_PM_INGRESS(1, 0, 0),
- MLXSW_SP_SB_PM_INGRESS(2, 0, 0),
- MLXSW_SP_SB_PM_INGRESS(3, 0, 0),
- MLXSW_SP_SB_PM_EGRESS(0, 0, 7),
- MLXSW_SP_SB_PM_EGRESS(1, 0, 0),
- MLXSW_SP_SB_PM_EGRESS(2, 0, 0),
- MLXSW_SP_SB_PM_EGRESS(3, 0, 0),
+static const struct mlxsw_sp_sb_pm mlxsw_sp_sb_pms_egress[] = {
+ MLXSW_SP_SB_PM(0, 7),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
+ MLXSW_SP_SB_PM(0, MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN),
};
-#define MLXSW_SP_SB_PMS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms)
+#define MLXSW_SP_SB_PMS_EGRESS_LEN ARRAY_SIZE(mlxsw_sp_sb_pms_egress)
-static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+static int __mlxsw_sp_port_sb_pms_init(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ enum mlxsw_reg_sbxx_dir dir,
+ const struct mlxsw_sp_sb_pm *pms,
+ size_t pms_len)
{
- char sbpm_pl[MLXSW_REG_SBPM_LEN];
int i;
int err;
- for (i = 0; i < MLXSW_SP_SB_PMS_LEN; i++) {
+ for (i = 0; i < pms_len; i++) {
const struct mlxsw_sp_sb_pm *pm;
- pm = &mlxsw_sp_sb_pms[i];
- mlxsw_reg_sbpm_pack(sbpm_pl, mlxsw_sp_port->local_port,
- pm->pool, pm->dir,
- pm->min_buff, pm->max_buff);
- err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core,
- MLXSW_REG(sbpm), sbpm_pl);
+ pm = &pms[i];
+ err = mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, i, dir,
+ pm->min_buff, pm->max_buff);
if (err)
return err;
}
return 0;
}
+static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port,
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ mlxsw_sp_sb_pms_ingress,
+ MLXSW_SP_SB_PMS_INGRESS_LEN);
+ if (err)
+ return err;
+ return __mlxsw_sp_port_sb_pms_init(mlxsw_sp_port->mlxsw_sp,
+ mlxsw_sp_port->local_port,
+ MLXSW_REG_SBXX_DIR_EGRESS,
+ mlxsw_sp_sb_pms_egress,
+ MLXSW_SP_SB_PMS_EGRESS_LEN);
+}
+
struct mlxsw_sp_sb_mm {
- u8 prio;
u32 min_buff;
u32 max_buff;
u8 pool;
};
-#define MLXSW_SP_SB_MM(_prio, _min_buff, _max_buff, _pool) \
- { \
- .prio = _prio, \
- .min_buff = _min_buff, \
- .max_buff = _max_buff, \
- .pool = _pool, \
+#define MLXSW_SP_SB_MM(_min_buff, _max_buff, _pool) \
+ { \
+ .min_buff = _min_buff, \
+ .max_buff = _max_buff, \
+ .pool = _pool, \
}
static const struct mlxsw_sp_sb_mm mlxsw_sp_sb_mms[] = {
- MLXSW_SP_SB_MM(0, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(1, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(2, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(3, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(4, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(5, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(6, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(7, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(8, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(9, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(10, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(11, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(12, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(13, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
- MLXSW_SP_SB_MM(14, 20000 / MLXSW_SP_SB_BYTES_PER_CELL, 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
+ MLXSW_SP_SB_MM(MLXSW_SP_BYTES_TO_CELLS(20000), 0xff, 0),
};
#define MLXSW_SP_SB_MMS_LEN ARRAY_SIZE(mlxsw_sp_sb_mms)
@@ -382,7 +513,7 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_sb_mm *mc;
mc = &mlxsw_sp_sb_mms[i];
- mlxsw_reg_sbmm_pack(sbmm_pl, mc->prio, mc->min_buff,
+ mlxsw_reg_sbmm_pack(sbmm_pl, i, mc->min_buff,
mc->max_buff, mc->pool);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbmm), sbmm_pl);
if (err)
@@ -391,26 +522,39 @@ static int mlxsw_sp_sb_mms_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+#define MLXSW_SP_SB_SIZE (16 * 1024 * 1024)
+
int mlxsw_sp_buffers_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
- err = mlxsw_sp_sb_pools_init(mlxsw_sp);
+ err = mlxsw_sp_sb_prs_init(mlxsw_sp);
if (err)
return err;
err = mlxsw_sp_cpu_port_sb_cms_init(mlxsw_sp);
if (err)
return err;
err = mlxsw_sp_sb_mms_init(mlxsw_sp);
+ if (err)
+ return err;
+ return devlink_sb_register(priv_to_devlink(mlxsw_sp->core), 0,
+ MLXSW_SP_SB_SIZE,
+ MLXSW_SP_SB_POOL_COUNT,
+ MLXSW_SP_SB_POOL_COUNT,
+ MLXSW_SP_SB_TC_COUNT,
+ MLXSW_SP_SB_TC_COUNT);
+}
- return err;
+void mlxsw_sp_buffers_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ devlink_sb_unregister(priv_to_devlink(mlxsw_sp->core), 0);
}
int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
int err;
- err = mlxsw_sp_port_pb_init(mlxsw_sp_port);
+ err = mlxsw_sp_port_headroom_init(mlxsw_sp_port);
if (err)
return err;
err = mlxsw_sp_port_sb_cms_init(mlxsw_sp_port);
@@ -420,3 +564,390 @@ int mlxsw_sp_port_buffers_init(struct mlxsw_sp_port *mlxsw_sp_port)
return err;
}
+
+static u8 pool_get(u16 pool_index)
+{
+ return pool_index % MLXSW_SP_SB_POOL_COUNT;
+}
+
+static u16 pool_index_get(u8 pool, enum mlxsw_reg_sbxx_dir dir)
+{
+ u16 pool_index;
+
+ pool_index = pool;
+ if (dir == MLXSW_REG_SBXX_DIR_EGRESS)
+ pool_index += MLXSW_SP_SB_POOL_COUNT;
+ return pool_index;
+}
+
+static enum mlxsw_reg_sbxx_dir dir_get(u16 pool_index)
+{
+ return pool_index < MLXSW_SP_SB_POOL_COUNT ?
+ MLXSW_REG_SBXX_DIR_INGRESS : MLXSW_REG_SBXX_DIR_EGRESS;
+}
+
+int mlxsw_sp_sb_pool_get(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index,
+ struct devlink_sb_pool_info *pool_info)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u8 pool = pool_get(pool_index);
+ enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+
+ pool_info->pool_type = dir;
+ pool_info->size = MLXSW_SP_CELLS_TO_BYTES(pr->size);
+ pool_info->threshold_type = pr->mode;
+ return 0;
+}
+
+int mlxsw_sp_sb_pool_set(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index, u16 pool_index, u32 size,
+ enum devlink_sb_threshold_type threshold_type)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ u8 pool = pool_get(pool_index);
+ enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+ enum mlxsw_reg_sbpr_mode mode = threshold_type;
+ u32 pool_size = MLXSW_SP_BYTES_TO_CELLS(size);
+
+ return mlxsw_sp_sb_pr_write(mlxsw_sp, pool, dir, mode, pool_size);
+}
+
+#define MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET (-2) /* 3->1, 16->14 */
+
+static u32 mlxsw_sp_sb_threshold_out(struct mlxsw_sp *mlxsw_sp, u8 pool,
+ enum mlxsw_reg_sbxx_dir dir, u32 max_buff)
+{
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+
+ if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC)
+ return max_buff - MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
+ return MLXSW_SP_CELLS_TO_BYTES(max_buff);
+}
+
+static int mlxsw_sp_sb_threshold_in(struct mlxsw_sp *mlxsw_sp, u8 pool,
+ enum mlxsw_reg_sbxx_dir dir, u32 threshold,
+ u32 *p_max_buff)
+{
+ struct mlxsw_sp_sb_pr *pr = mlxsw_sp_sb_pr_get(mlxsw_sp, pool, dir);
+
+ if (pr->mode == MLXSW_REG_SBPR_MODE_DYNAMIC) {
+ int val;
+
+ val = threshold + MLXSW_SP_SB_THRESHOLD_TO_ALPHA_OFFSET;
+ if (val < MLXSW_REG_SBXX_DYN_MAX_BUFF_MIN ||
+ val > MLXSW_REG_SBXX_DYN_MAX_BUFF_MAX)
+ return -EINVAL;
+ *p_max_buff = val;
+ } else {
+ *p_max_buff = MLXSW_SP_BYTES_TO_CELLS(threshold);
+ }
+ return 0;
+}
+
+int mlxsw_sp_sb_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_threshold)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pool = pool_get(pool_index);
+ enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+ struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
+ pool, dir);
+
+ *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, pool, dir,
+ pm->max_buff);
+ return 0;
+}
+
+int mlxsw_sp_sb_port_pool_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 threshold)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pool = pool_get(pool_index);
+ enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+ u32 max_buff;
+ int err;
+
+ err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
+ threshold, &max_buff);
+ if (err)
+ return err;
+
+ return mlxsw_sp_sb_pm_write(mlxsw_sp, local_port, pool, dir,
+ 0, max_buff);
+}
+
+int mlxsw_sp_sb_tc_pool_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 *p_pool_index, u32 *p_threshold)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pg_buff = tc_index;
+ enum mlxsw_reg_sbxx_dir dir = pool_type;
+ struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
+ pg_buff, dir);
+
+ *p_threshold = mlxsw_sp_sb_threshold_out(mlxsw_sp, cm->pool, dir,
+ cm->max_buff);
+ *p_pool_index = pool_index_get(cm->pool, pool_type);
+ return 0;
+}
+
+int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u16 pool_index, u32 threshold)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pg_buff = tc_index;
+ enum mlxsw_reg_sbxx_dir dir = pool_type;
+ u8 pool = pool_get(pool_index);
+ u32 max_buff;
+ int err;
+
+ if (dir != dir_get(pool_index))
+ return -EINVAL;
+
+ err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
+ threshold, &max_buff);
+ if (err)
+ return err;
+
+ return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
+ 0, max_buff, pool);
+}
+
+#define MASKED_COUNT_MAX \
+ (MLXSW_REG_SBSR_REC_MAX_COUNT / (MLXSW_SP_SB_TC_COUNT * 2))
+
+struct mlxsw_sp_sb_sr_occ_query_cb_ctx {
+ u8 masked_count;
+ u8 local_port_1;
+};
+
+static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core,
+ char *sbsr_pl, size_t sbsr_pl_len,
+ unsigned long cb_priv)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+ u8 masked_count;
+ u8 local_port;
+ int rec_index = 0;
+ struct mlxsw_sp_sb_cm *cm;
+ int i;
+
+ memcpy(&cb_ctx, &cb_priv, sizeof(cb_ctx));
+
+ masked_count = 0;
+ for (local_port = cb_ctx.local_port_1;
+ local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_INGRESS);
+ mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
+ &cm->occ.cur, &cm->occ.max);
+ }
+ if (++masked_count == cb_ctx.masked_count)
+ break;
+ }
+ masked_count = 0;
+ for (local_port = cb_ctx.local_port_1;
+ local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_EGRESS);
+ mlxsw_reg_sbsr_rec_unpack(sbsr_pl, rec_index++,
+ &cm->occ.cur, &cm->occ.max);
+ }
+ if (++masked_count == cb_ctx.masked_count)
+ break;
+ }
+}
+
+int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
+ unsigned long cb_priv;
+ LIST_HEAD(bulk_list);
+ char *sbsr_pl;
+ u8 masked_count;
+ u8 local_port_1;
+ u8 local_port = 0;
+ int i;
+ int err;
+ int err2;
+
+ sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
+ if (!sbsr_pl)
+ return -ENOMEM;
+
+next_batch:
+ local_port++;
+ local_port_1 = local_port;
+ masked_count = 0;
+ mlxsw_reg_sbsr_pack(sbsr_pl, false);
+ for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
+ mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
+ }
+ for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
+ err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ &bulk_list);
+ if (err)
+ goto out;
+ err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_EGRESS,
+ &bulk_list);
+ if (err)
+ goto out;
+ }
+ if (++masked_count == MASKED_COUNT_MAX)
+ goto do_query;
+ }
+
+do_query:
+ cb_ctx.masked_count = masked_count;
+ cb_ctx.local_port_1 = local_port_1;
+ memcpy(&cb_priv, &cb_ctx, sizeof(cb_ctx));
+ err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
+ &bulk_list, mlxsw_sp_sb_sr_occ_query_cb,
+ cb_priv);
+ if (err)
+ goto out;
+ if (local_port < MLXSW_PORT_MAX_PORTS)
+ goto next_batch;
+
+out:
+ err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
+ if (!err)
+ err = err2;
+ kfree(sbsr_pl);
+ return err;
+}
+
+int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core,
+ unsigned int sb_index)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ LIST_HEAD(bulk_list);
+ char *sbsr_pl;
+ unsigned int masked_count;
+ u8 local_port = 0;
+ int i;
+ int err;
+ int err2;
+
+ sbsr_pl = kmalloc(MLXSW_REG_SBSR_LEN, GFP_KERNEL);
+ if (!sbsr_pl)
+ return -ENOMEM;
+
+next_batch:
+ local_port++;
+ masked_count = 0;
+ mlxsw_reg_sbsr_pack(sbsr_pl, true);
+ for (i = 0; i < MLXSW_SP_SB_TC_COUNT; i++) {
+ mlxsw_reg_sbsr_pg_buff_mask_set(sbsr_pl, i, 1);
+ mlxsw_reg_sbsr_tclass_mask_set(sbsr_pl, i, 1);
+ }
+ for (; local_port < MLXSW_PORT_MAX_PORTS; local_port++) {
+ if (!mlxsw_sp->ports[local_port])
+ continue;
+ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, local_port, 1);
+ mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1);
+ for (i = 0; i < MLXSW_SP_SB_POOL_COUNT; i++) {
+ err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_INGRESS,
+ &bulk_list);
+ if (err)
+ goto out;
+ err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i,
+ MLXSW_REG_SBXX_DIR_EGRESS,
+ &bulk_list);
+ if (err)
+ goto out;
+ }
+ if (++masked_count == MASKED_COUNT_MAX)
+ goto do_query;
+ }
+
+do_query:
+ err = mlxsw_reg_trans_query(mlxsw_core, MLXSW_REG(sbsr), sbsr_pl,
+ &bulk_list, NULL, 0);
+ if (err)
+ goto out;
+ if (local_port < MLXSW_PORT_MAX_PORTS)
+ goto next_batch;
+
+out:
+ err2 = mlxsw_reg_trans_bulk_wait(&bulk_list);
+ if (!err)
+ err = err2;
+ kfree(sbsr_pl);
+ return err;
+}
+
+int mlxsw_sp_sb_occ_port_pool_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 pool_index,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pool = pool_get(pool_index);
+ enum mlxsw_reg_sbxx_dir dir = dir_get(pool_index);
+ struct mlxsw_sp_sb_pm *pm = mlxsw_sp_sb_pm_get(mlxsw_sp, local_port,
+ pool, dir);
+
+ *p_cur = MLXSW_SP_CELLS_TO_BYTES(pm->occ.cur);
+ *p_max = MLXSW_SP_CELLS_TO_BYTES(pm->occ.max);
+ return 0;
+}
+
+int mlxsw_sp_sb_occ_tc_port_bind_get(struct mlxsw_core_port *mlxsw_core_port,
+ unsigned int sb_index, u16 tc_index,
+ enum devlink_sb_pool_type pool_type,
+ u32 *p_cur, u32 *p_max)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port =
+ mlxsw_core_port_driver_priv(mlxsw_core_port);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 local_port = mlxsw_sp_port->local_port;
+ u8 pg_buff = tc_index;
+ enum mlxsw_reg_sbxx_dir dir = pool_type;
+ struct mlxsw_sp_sb_cm *cm = mlxsw_sp_sb_cm_get(mlxsw_sp, local_port,
+ pg_buff, dir);
+
+ *p_cur = MLXSW_SP_CELLS_TO_BYTES(cm->occ.cur);
+ *p_max = MLXSW_SP_CELLS_TO_BYTES(cm->occ.max);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
new file mode 100644
index 000000000000..b6ed7f7c531e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -0,0 +1,486 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/bitops.h>
+#include <net/dcbnl.h>
+
+#include "spectrum.h"
+#include "reg.h"
+
+static u8 mlxsw_sp_dcbnl_getdcbx(struct net_device __always_unused *dev)
+{
+ return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 mlxsw_sp_dcbnl_setdcbx(struct net_device __always_unused *dev,
+ u8 mode)
+{
+ return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_getets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ memcpy(ets, mlxsw_sp_port->dcb.ets, sizeof(*ets));
+
+ return 0;
+}
+
+static int mlxsw_sp_port_ets_validate(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_ets *ets)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ bool has_ets_tc = false;
+ int i, tx_bw_sum = 0;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ has_ets_tc = true;
+ tx_bw_sum += ets->tc_tx_bw[i];
+ break;
+ default:
+ netdev_err(dev, "Only strict priority and ETS are supported\n");
+ return -EINVAL;
+ }
+
+ if (ets->prio_tc[i] >= IEEE_8021QAZ_MAX_TCS) {
+ netdev_err(dev, "Invalid TC\n");
+ return -EINVAL;
+ }
+ }
+
+ if (has_ets_tc && tx_bw_sum != 100) {
+ netdev_err(dev, "Total ETS bandwidth should equal 100\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_port_pg_prio_map(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 *prio_tc)
+{
+ char pptb_pl[MLXSW_REG_PPTB_LEN];
+ int i;
+
+ mlxsw_reg_pptb_pack(pptb_pl, mlxsw_sp_port->local_port);
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_reg_pptb_prio_to_buff_pack(pptb_pl, i, prio_tc[i]);
+
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pptb),
+ pptb_pl);
+}
+
+static bool mlxsw_sp_ets_has_pg(u8 *prio_tc, u8 pg)
+{
+ int i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ if (prio_tc[i] == pg)
+ return true;
+ return false;
+}
+
+static int mlxsw_sp_port_pg_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 *old_prio_tc, u8 *new_prio_tc)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char pbmc_pl[MLXSW_REG_PBMC_LEN];
+ int err, i;
+
+ mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+ if (err)
+ return err;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ u8 pg = old_prio_tc[i];
+
+ if (!mlxsw_sp_ets_has_pg(new_prio_tc, pg))
+ mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, pg, 0);
+ }
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
+}
+
+static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_ets *ets)
+{
+ bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
+ struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int err;
+
+ /* Create the required PGs, but don't destroy existing ones, as
+ * traffic is still directed to them.
+ */
+ err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
+ ets->prio_tc, pause_en,
+ mlxsw_sp_port->dcb.pfc);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_pg_prio_map(mlxsw_sp_port, ets->prio_tc);
+ if (err) {
+ netdev_err(dev, "Failed to set PG-priority mapping\n");
+ goto err_port_prio_pg_map;
+ }
+
+ err = mlxsw_sp_port_pg_destroy(mlxsw_sp_port, my_ets->prio_tc,
+ ets->prio_tc);
+ if (err)
+ netdev_warn(dev, "Failed to remove ununsed PGs\n");
+
+ return 0;
+
+err_port_prio_pg_map:
+ mlxsw_sp_port_pg_destroy(mlxsw_sp_port, ets->prio_tc, my_ets->prio_tc);
+ return err;
+}
+
+static int __mlxsw_sp_dcbnl_ieee_setets(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_ets *ets)
+{
+ struct ieee_ets *my_ets = mlxsw_sp_port->dcb.ets;
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int i, err;
+
+ /* Egress configuration. */
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ bool dwrr = ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS;
+ u8 weight = ets->tc_tx_bw[i];
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ 0, dwrr, weight);
+ if (err) {
+ netdev_err(dev, "Failed to link subgroup ETS element %d to group\n",
+ i);
+ goto err_port_ets_set;
+ }
+ }
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
+ ets->prio_tc[i]);
+ if (err) {
+ netdev_err(dev, "Failed to map prio %d to TC %d\n", i,
+ ets->prio_tc[i]);
+ goto err_port_prio_tc_set;
+ }
+ }
+
+ /* Ingress configuration. */
+ err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, ets);
+ if (err)
+ goto err_port_headroom_set;
+
+ return 0;
+
+err_port_headroom_set:
+ i = IEEE_8021QAZ_MAX_TCS;
+err_port_prio_tc_set:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, my_ets->prio_tc[i]);
+ i = IEEE_8021QAZ_MAX_TCS;
+err_port_ets_set:
+ for (i--; i >= 0; i--) {
+ bool dwrr = my_ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS;
+ u8 weight = my_ets->tc_tx_bw[i];
+
+ err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
+ 0, dwrr, weight);
+ }
+ return err;
+}
+
+static int mlxsw_sp_dcbnl_ieee_setets(struct net_device *dev,
+ struct ieee_ets *ets)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err;
+
+ err = mlxsw_sp_port_ets_validate(mlxsw_sp_port, ets);
+ if (err)
+ return err;
+
+ err = __mlxsw_sp_dcbnl_ieee_setets(mlxsw_sp_port, ets);
+ if (err)
+ return err;
+
+ memcpy(mlxsw_sp_port->dcb.ets, ets, sizeof(*ets));
+ mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_getmaxrate(struct net_device *dev,
+ struct ieee_maxrate *maxrate)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+
+ memcpy(maxrate, mlxsw_sp_port->dcb.maxrate, sizeof(*maxrate));
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_setmaxrate(struct net_device *dev,
+ struct ieee_maxrate *maxrate)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct ieee_maxrate *my_maxrate = mlxsw_sp_port->dcb.maxrate;
+ int err, i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ i, 0,
+ maxrate->tc_maxrate[i]);
+ if (err) {
+ netdev_err(dev, "Failed to set maxrate for TC %d\n", i);
+ goto err_port_ets_maxrate_set;
+ }
+ }
+
+ memcpy(mlxsw_sp_port->dcb.maxrate, maxrate, sizeof(*maxrate));
+
+ return 0;
+
+err_port_ets_maxrate_set:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
+ MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
+ i, 0, my_maxrate->tc_maxrate[i]);
+ return err;
+}
+
+static int mlxsw_sp_port_pfc_cnt_get(struct mlxsw_sp_port *mlxsw_sp_port,
+ u8 prio)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct ieee_pfc *my_pfc = mlxsw_sp_port->dcb.pfc;
+ char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
+ int err;
+
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port,
+ MLXSW_REG_PPCNT_PRIO_CNT, prio);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
+ if (err)
+ return err;
+
+ my_pfc->requests[prio] = mlxsw_reg_ppcnt_tx_pause_get(ppcnt_pl);
+ my_pfc->indications[prio] = mlxsw_reg_ppcnt_rx_pause_get(ppcnt_pl);
+
+ return 0;
+}
+
+static int mlxsw_sp_dcbnl_ieee_getpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ int err, i;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_pfc_cnt_get(mlxsw_sp_port, i);
+ if (err) {
+ netdev_err(dev, "Failed to get PFC count for priority %d\n",
+ i);
+ return err;
+ }
+ }
+
+ memcpy(pfc, mlxsw_sp_port->dcb.pfc, sizeof(*pfc));
+
+ return 0;
+}
+
+static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct ieee_pfc *pfc)
+{
+ char pfcc_pl[MLXSW_REG_PFCC_LEN];
+
+ mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause);
+ mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause);
+ mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en);
+
+ return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
+ pfcc_pl);
+}
+
+static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
+ struct ieee_pfc *pfc)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
+ int err;
+
+ if (pause_en && pfc->pfc_en) {
+ netdev_err(dev, "PAUSE frames already enabled on port\n");
+ return -EINVAL;
+ }
+
+ err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
+ mlxsw_sp_port->dcb.ets->prio_tc,
+ pause_en, pfc);
+ if (err) {
+ netdev_err(dev, "Failed to configure port's headroom for PFC\n");
+ return err;
+ }
+
+ err = mlxsw_sp_port_pfc_set(mlxsw_sp_port, pfc);
+ if (err) {
+ netdev_err(dev, "Failed to configure PFC\n");
+ goto err_port_pfc_set;
+ }
+
+ memcpy(mlxsw_sp_port->dcb.pfc, pfc, sizeof(*pfc));
+ mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+
+err_port_pfc_set:
+ __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
+ mlxsw_sp_port->dcb.ets->prio_tc, pause_en,
+ mlxsw_sp_port->dcb.pfc);
+ return err;
+}
+
+static const struct dcbnl_rtnl_ops mlxsw_sp_dcbnl_ops = {
+ .ieee_getets = mlxsw_sp_dcbnl_ieee_getets,
+ .ieee_setets = mlxsw_sp_dcbnl_ieee_setets,
+ .ieee_getmaxrate = mlxsw_sp_dcbnl_ieee_getmaxrate,
+ .ieee_setmaxrate = mlxsw_sp_dcbnl_ieee_setmaxrate,
+ .ieee_getpfc = mlxsw_sp_dcbnl_ieee_getpfc,
+ .ieee_setpfc = mlxsw_sp_dcbnl_ieee_setpfc,
+
+ .getdcbx = mlxsw_sp_dcbnl_getdcbx,
+ .setdcbx = mlxsw_sp_dcbnl_setdcbx,
+};
+
+static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->dcb.ets = kzalloc(sizeof(*mlxsw_sp_port->dcb.ets),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->dcb.ets)
+ return -ENOMEM;
+
+ mlxsw_sp_port->dcb.ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+}
+
+static void mlxsw_sp_port_ets_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->dcb.ets);
+}
+
+static int mlxsw_sp_port_maxrate_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int i;
+
+ mlxsw_sp_port->dcb.maxrate = kmalloc(sizeof(*mlxsw_sp_port->dcb.maxrate),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->dcb.maxrate)
+ return -ENOMEM;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port->dcb.maxrate->tc_maxrate[i] = MLXSW_REG_QEEC_MAS_DIS;
+
+ return 0;
+}
+
+static void mlxsw_sp_port_maxrate_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->dcb.maxrate);
+}
+
+static int mlxsw_sp_port_pfc_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->dcb.pfc = kzalloc(sizeof(*mlxsw_sp_port->dcb.pfc),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->dcb.pfc)
+ return -ENOMEM;
+
+ mlxsw_sp_port->dcb.pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
+
+ return 0;
+}
+
+static void mlxsw_sp_port_pfc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ kfree(mlxsw_sp_port->dcb.pfc);
+}
+
+int mlxsw_sp_port_dcb_init(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ int err;
+
+ err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
+ if (err)
+ return err;
+ err = mlxsw_sp_port_maxrate_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_maxrate_init;
+ err = mlxsw_sp_port_pfc_init(mlxsw_sp_port);
+ if (err)
+ goto err_port_pfc_init;
+
+ mlxsw_sp_port->dev->dcbnl_ops = &mlxsw_sp_dcbnl_ops;
+
+ return 0;
+
+err_port_pfc_init:
+ mlxsw_sp_port_maxrate_fini(mlxsw_sp_port);
+err_port_maxrate_init:
+ mlxsw_sp_port_ets_fini(mlxsw_sp_port);
+ return err;
+}
+
+void mlxsw_sp_port_dcb_fini(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port_pfc_fini(mlxsw_sp_port);
+ mlxsw_sp_port_maxrate_fini(mlxsw_sp_port);
+ mlxsw_sp_port_ets_fini(mlxsw_sp_port);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
new file mode 100644
index 000000000000..ac321e8e5c1a
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -0,0 +1,91 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+
+#include "spectrum.h"
+
+#define MLXSW_SP_KVDL_SINGLE_BASE 0
+#define MLXSW_SP_KVDL_SINGLE_SIZE 16384
+#define MLXSW_SP_KVDL_CHUNKS_BASE \
+ (MLXSW_SP_KVDL_SINGLE_BASE + MLXSW_SP_KVDL_SINGLE_SIZE)
+#define MLXSW_SP_KVDL_CHUNKS_SIZE \
+ (MLXSW_SP_KVD_LINEAR_SIZE - MLXSW_SP_KVDL_CHUNKS_BASE)
+#define MLXSW_SP_CHUNK_MAX 32
+
+int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count)
+{
+ int entry_index;
+ int size;
+ int type_base;
+ int type_size;
+ int type_entries;
+
+ if (entry_count == 0 || entry_count > MLXSW_SP_CHUNK_MAX) {
+ return -EINVAL;
+ } else if (entry_count == 1) {
+ type_base = MLXSW_SP_KVDL_SINGLE_BASE;
+ type_size = MLXSW_SP_KVDL_SINGLE_SIZE;
+ type_entries = 1;
+ } else {
+ type_base = MLXSW_SP_KVDL_CHUNKS_BASE;
+ type_size = MLXSW_SP_KVDL_CHUNKS_SIZE;
+ type_entries = MLXSW_SP_CHUNK_MAX;
+ }
+
+ entry_index = type_base;
+ size = type_base + type_size;
+ for_each_clear_bit_from(entry_index, mlxsw_sp->kvdl.usage, size) {
+ int i;
+
+ for (i = 0; i < type_entries; i++)
+ set_bit(entry_index + i, mlxsw_sp->kvdl.usage);
+ return entry_index;
+ }
+ return -ENOBUFS;
+}
+
+void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index)
+{
+ int type_entries;
+ int i;
+
+ if (entry_index < MLXSW_SP_KVDL_CHUNKS_BASE)
+ type_entries = 1;
+ else
+ type_entries = MLXSW_SP_CHUNK_MAX;
+ for (i = 0; i < type_entries; i++)
+ clear_bit(entry_index + i, mlxsw_sp->kvdl.usage);
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
new file mode 100644
index 000000000000..3f5c51da6d3e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -0,0 +1,1863 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+ * Copyright (c) 2016 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
+ * Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
+ * Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/rhashtable.h>
+#include <linux/bitops.h>
+#include <linux/in6.h>
+#include <linux/notifier.h>
+#include <net/netevent.h>
+#include <net/neighbour.h>
+#include <net/arp.h>
+
+#include "spectrum.h"
+#include "core.h"
+#include "reg.h"
+
+#define mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) \
+ for_each_set_bit(prefix, (prefix_usage)->b, MLXSW_SP_PREFIX_COUNT)
+
+static bool
+mlxsw_sp_prefix_usage_subset(struct mlxsw_sp_prefix_usage *prefix_usage1,
+ struct mlxsw_sp_prefix_usage *prefix_usage2)
+{
+ unsigned char prefix;
+
+ mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage1) {
+ if (!test_bit(prefix, prefix_usage2->b))
+ return false;
+ }
+ return true;
+}
+
+static bool
+mlxsw_sp_prefix_usage_eq(struct mlxsw_sp_prefix_usage *prefix_usage1,
+ struct mlxsw_sp_prefix_usage *prefix_usage2)
+{
+ return !memcmp(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
+}
+
+static bool
+mlxsw_sp_prefix_usage_none(struct mlxsw_sp_prefix_usage *prefix_usage)
+{
+ struct mlxsw_sp_prefix_usage prefix_usage_none = {{ 0 } };
+
+ return mlxsw_sp_prefix_usage_eq(prefix_usage, &prefix_usage_none);
+}
+
+static void
+mlxsw_sp_prefix_usage_cpy(struct mlxsw_sp_prefix_usage *prefix_usage1,
+ struct mlxsw_sp_prefix_usage *prefix_usage2)
+{
+ memcpy(prefix_usage1, prefix_usage2, sizeof(*prefix_usage1));
+}
+
+static void
+mlxsw_sp_prefix_usage_zero(struct mlxsw_sp_prefix_usage *prefix_usage)
+{
+ memset(prefix_usage, 0, sizeof(*prefix_usage));
+}
+
+static void
+mlxsw_sp_prefix_usage_set(struct mlxsw_sp_prefix_usage *prefix_usage,
+ unsigned char prefix_len)
+{
+ set_bit(prefix_len, prefix_usage->b);
+}
+
+static void
+mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
+ unsigned char prefix_len)
+{
+ clear_bit(prefix_len, prefix_usage->b);
+}
+
+struct mlxsw_sp_fib_key {
+ struct net_device *dev;
+ unsigned char addr[sizeof(struct in6_addr)];
+ unsigned char prefix_len;
+};
+
+enum mlxsw_sp_fib_entry_type {
+ MLXSW_SP_FIB_ENTRY_TYPE_REMOTE,
+ MLXSW_SP_FIB_ENTRY_TYPE_LOCAL,
+ MLXSW_SP_FIB_ENTRY_TYPE_TRAP,
+};
+
+struct mlxsw_sp_nexthop_group;
+
+struct mlxsw_sp_fib_entry {
+ struct rhash_head ht_node;
+ struct mlxsw_sp_fib_key key;
+ enum mlxsw_sp_fib_entry_type type;
+ unsigned int ref_count;
+ u16 rif; /* used for action local */
+ struct mlxsw_sp_vr *vr;
+ struct list_head nexthop_group_node;
+ struct mlxsw_sp_nexthop_group *nh_group;
+};
+
+struct mlxsw_sp_fib {
+ struct rhashtable ht;
+ unsigned long prefix_ref_count[MLXSW_SP_PREFIX_COUNT];
+ struct mlxsw_sp_prefix_usage prefix_usage;
+};
+
+static const struct rhashtable_params mlxsw_sp_fib_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_fib_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_fib_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_fib_key),
+ .automatic_shrinking = true,
+};
+
+static int mlxsw_sp_fib_entry_insert(struct mlxsw_sp_fib *fib,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ unsigned char prefix_len = fib_entry->key.prefix_len;
+ int err;
+
+ err = rhashtable_insert_fast(&fib->ht, &fib_entry->ht_node,
+ mlxsw_sp_fib_ht_params);
+ if (err)
+ return err;
+ if (fib->prefix_ref_count[prefix_len]++ == 0)
+ mlxsw_sp_prefix_usage_set(&fib->prefix_usage, prefix_len);
+ return 0;
+}
+
+static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ unsigned char prefix_len = fib_entry->key.prefix_len;
+
+ if (--fib->prefix_ref_count[prefix_len] == 0)
+ mlxsw_sp_prefix_usage_clear(&fib->prefix_usage, prefix_len);
+ rhashtable_remove_fast(&fib->ht, &fib_entry->ht_node,
+ mlxsw_sp_fib_ht_params);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
+ size_t addr_len, unsigned char prefix_len,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_fib_entry *fib_entry;
+
+ fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
+ if (!fib_entry)
+ return NULL;
+ fib_entry->key.dev = dev;
+ memcpy(fib_entry->key.addr, addr, addr_len);
+ fib_entry->key.prefix_len = prefix_len;
+ return fib_entry;
+}
+
+static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
+{
+ kfree(fib_entry);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
+ size_t addr_len, unsigned char prefix_len,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_fib_key key;
+
+ memset(&key, 0, sizeof(key));
+ key.dev = dev;
+ memcpy(key.addr, addr, addr_len);
+ key.prefix_len = prefix_len;
+ return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
+}
+
+static struct mlxsw_sp_fib *mlxsw_sp_fib_create(void)
+{
+ struct mlxsw_sp_fib *fib;
+ int err;
+
+ fib = kzalloc(sizeof(*fib), GFP_KERNEL);
+ if (!fib)
+ return ERR_PTR(-ENOMEM);
+ err = rhashtable_init(&fib->ht, &mlxsw_sp_fib_ht_params);
+ if (err)
+ goto err_rhashtable_init;
+ return fib;
+
+err_rhashtable_init:
+ kfree(fib);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_fib_destroy(struct mlxsw_sp_fib *fib)
+{
+ rhashtable_destroy(&fib->ht);
+ kfree(fib);
+}
+
+static struct mlxsw_sp_lpm_tree *
+mlxsw_sp_lpm_tree_find_unused(struct mlxsw_sp *mlxsw_sp, bool one_reserved)
+{
+ static struct mlxsw_sp_lpm_tree *lpm_tree;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
+ lpm_tree = &mlxsw_sp->router.lpm_trees[i];
+ if (lpm_tree->ref_count == 0) {
+ if (one_reserved)
+ one_reserved = false;
+ else
+ return lpm_tree;
+ }
+ }
+ return NULL;
+}
+
+static int mlxsw_sp_lpm_tree_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
+
+ mlxsw_reg_ralta_pack(ralta_pl, true, lpm_tree->proto, lpm_tree->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+}
+
+static int mlxsw_sp_lpm_tree_free(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ char ralta_pl[MLXSW_REG_RALTA_LEN];
+
+ mlxsw_reg_ralta_pack(ralta_pl, false, lpm_tree->proto, lpm_tree->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralta), ralta_pl);
+}
+
+static int
+mlxsw_sp_lpm_tree_left_struct_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_prefix_usage *prefix_usage,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ char ralst_pl[MLXSW_REG_RALST_LEN];
+ u8 root_bin = 0;
+ u8 prefix;
+ u8 last_prefix = MLXSW_REG_RALST_BIN_NO_CHILD;
+
+ mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage)
+ root_bin = prefix;
+
+ mlxsw_reg_ralst_pack(ralst_pl, root_bin, lpm_tree->id);
+ mlxsw_sp_prefix_usage_for_each(prefix, prefix_usage) {
+ if (prefix == 0)
+ continue;
+ mlxsw_reg_ralst_bin_pack(ralst_pl, prefix, last_prefix,
+ MLXSW_REG_RALST_BIN_NO_CHILD);
+ last_prefix = prefix;
+ }
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralst), ralst_pl);
+}
+
+static struct mlxsw_sp_lpm_tree *
+mlxsw_sp_lpm_tree_create(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_prefix_usage *prefix_usage,
+ enum mlxsw_sp_l3proto proto, bool one_reserved)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ int err;
+
+ lpm_tree = mlxsw_sp_lpm_tree_find_unused(mlxsw_sp, one_reserved);
+ if (!lpm_tree)
+ return ERR_PTR(-EBUSY);
+ lpm_tree->proto = proto;
+ err = mlxsw_sp_lpm_tree_alloc(mlxsw_sp, lpm_tree);
+ if (err)
+ return ERR_PTR(err);
+
+ err = mlxsw_sp_lpm_tree_left_struct_set(mlxsw_sp, prefix_usage,
+ lpm_tree);
+ if (err)
+ goto err_left_struct_set;
+ return lpm_tree;
+
+err_left_struct_set:
+ mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+ return ERR_PTR(err);
+}
+
+static int mlxsw_sp_lpm_tree_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ return mlxsw_sp_lpm_tree_free(mlxsw_sp, lpm_tree);
+}
+
+static struct mlxsw_sp_lpm_tree *
+mlxsw_sp_lpm_tree_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_prefix_usage *prefix_usage,
+ enum mlxsw_sp_l3proto proto, bool one_reserved)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
+ lpm_tree = &mlxsw_sp->router.lpm_trees[i];
+ if (lpm_tree->proto == proto &&
+ mlxsw_sp_prefix_usage_eq(&lpm_tree->prefix_usage,
+ prefix_usage))
+ goto inc_ref_count;
+ }
+ lpm_tree = mlxsw_sp_lpm_tree_create(mlxsw_sp, prefix_usage,
+ proto, one_reserved);
+ if (IS_ERR(lpm_tree))
+ return lpm_tree;
+
+inc_ref_count:
+ lpm_tree->ref_count++;
+ return lpm_tree;
+}
+
+static int mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_lpm_tree *lpm_tree)
+{
+ if (--lpm_tree->ref_count == 0)
+ return mlxsw_sp_lpm_tree_destroy(mlxsw_sp, lpm_tree);
+ return 0;
+}
+
+static void mlxsw_sp_lpm_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_LPM_TREE_COUNT; i++) {
+ lpm_tree = &mlxsw_sp->router.lpm_trees[i];
+ lpm_tree->id = i + MLXSW_SP_LPM_TREE_MIN;
+ }
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_vr *vr;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ vr = &mlxsw_sp->router.vrs[i];
+ if (!vr->used)
+ return vr;
+ }
+ return NULL;
+}
+
+static int mlxsw_sp_vr_lpm_tree_bind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr)
+{
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
+
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, vr->lpm_tree->id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+}
+
+static int mlxsw_sp_vr_lpm_tree_unbind(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr)
+{
+ char raltb_pl[MLXSW_REG_RALTB_LEN];
+
+ /* Bind to tree 0 which is default */
+ mlxsw_reg_raltb_pack(raltb_pl, vr->id, vr->proto, 0);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raltb), raltb_pl);
+}
+
+static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
+{
+ /* For our purpose, squash main and local table into one */
+ if (tb_id == RT_TABLE_LOCAL)
+ tb_id = RT_TABLE_MAIN;
+ return tb_id;
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
+ u32 tb_id,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_vr *vr;
+ int i;
+
+ tb_id = mlxsw_sp_fix_tb_id(tb_id);
+ for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ vr = &mlxsw_sp->router.vrs[i];
+ if (vr->used && vr->proto == proto && vr->tb_id == tb_id)
+ return vr;
+ }
+ return NULL;
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
+ unsigned char prefix_len,
+ u32 tb_id,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
+ if (!vr)
+ return ERR_PTR(-EBUSY);
+ vr->fib = mlxsw_sp_fib_create();
+ if (IS_ERR(vr->fib))
+ return ERR_CAST(vr->fib);
+
+ vr->proto = proto;
+ vr->tb_id = tb_id;
+ mlxsw_sp_prefix_usage_zero(&req_prefix_usage);
+ mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage,
+ proto, true);
+ if (IS_ERR(lpm_tree)) {
+ err = PTR_ERR(lpm_tree);
+ goto err_tree_get;
+ }
+ vr->lpm_tree = lpm_tree;
+ err = mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
+ if (err)
+ goto err_tree_bind;
+
+ vr->used = true;
+ return vr;
+
+err_tree_bind:
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
+err_tree_get:
+ mlxsw_sp_fib_destroy(vr->fib);
+
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_vr_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr)
+{
+ mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
+ mlxsw_sp_fib_destroy(vr->fib);
+ vr->used = false;
+}
+
+static int
+mlxsw_sp_vr_lpm_tree_check(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
+ struct mlxsw_sp_prefix_usage *req_prefix_usage)
+{
+ struct mlxsw_sp_lpm_tree *lpm_tree;
+
+ if (mlxsw_sp_prefix_usage_eq(req_prefix_usage,
+ &vr->lpm_tree->prefix_usage))
+ return 0;
+
+ lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, req_prefix_usage,
+ vr->proto, false);
+ if (IS_ERR(lpm_tree)) {
+ /* We failed to get a tree according to the required
+ * prefix usage. However, the current tree might be still good
+ * for us if our requirement is subset of the prefixes used
+ * in the tree.
+ */
+ if (mlxsw_sp_prefix_usage_subset(req_prefix_usage,
+ &vr->lpm_tree->prefix_usage))
+ return 0;
+ return PTR_ERR(lpm_tree);
+ }
+
+ mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, vr);
+ mlxsw_sp_lpm_tree_put(mlxsw_sp, vr->lpm_tree);
+ vr->lpm_tree = lpm_tree;
+ return mlxsw_sp_vr_lpm_tree_bind(mlxsw_sp, vr);
+}
+
+static struct mlxsw_sp_vr *mlxsw_sp_vr_get(struct mlxsw_sp *mlxsw_sp,
+ unsigned char prefix_len,
+ u32 tb_id,
+ enum mlxsw_sp_l3proto proto)
+{
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ tb_id = mlxsw_sp_fix_tb_id(tb_id);
+ vr = mlxsw_sp_vr_find(mlxsw_sp, tb_id, proto);
+ if (!vr) {
+ vr = mlxsw_sp_vr_create(mlxsw_sp, prefix_len, tb_id, proto);
+ if (IS_ERR(vr))
+ return vr;
+ } else {
+ struct mlxsw_sp_prefix_usage req_prefix_usage;
+
+ mlxsw_sp_prefix_usage_cpy(&req_prefix_usage,
+ &vr->fib->prefix_usage);
+ mlxsw_sp_prefix_usage_set(&req_prefix_usage, prefix_len);
+ /* Need to replace LPM tree in case new prefix is required. */
+ err = mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
+ &req_prefix_usage);
+ if (err)
+ return ERR_PTR(err);
+ }
+ return vr;
+}
+
+static void mlxsw_sp_vr_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr)
+{
+ /* Destroy virtual router entity in case the associated FIB is empty
+ * and allow it to be used for other tables in future. Otherwise,
+ * check if some prefix usage did not disappear and change tree if
+ * that is the case. Note that in case new, smaller tree cannot be
+ * allocated, the original one will be kept being used.
+ */
+ if (mlxsw_sp_prefix_usage_none(&vr->fib->prefix_usage))
+ mlxsw_sp_vr_destroy(mlxsw_sp, vr);
+ else
+ mlxsw_sp_vr_lpm_tree_check(mlxsw_sp, vr,
+ &vr->fib->prefix_usage);
+}
+
+static void mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_vr *vr;
+ int i;
+
+ for (i = 0; i < MLXSW_SP_VIRTUAL_ROUTER_MAX; i++) {
+ vr = &mlxsw_sp->router.vrs[i];
+ vr->id = i;
+ }
+}
+
+struct mlxsw_sp_neigh_key {
+ unsigned char addr[sizeof(struct in6_addr)];
+ struct net_device *dev;
+};
+
+struct mlxsw_sp_neigh_entry {
+ struct rhash_head ht_node;
+ struct mlxsw_sp_neigh_key key;
+ u16 rif;
+ struct neighbour *n;
+ bool offloaded;
+ struct delayed_work dw;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ unsigned char ha[ETH_ALEN];
+ struct list_head nexthop_list; /* list of nexthops using
+ * this neigh entry
+ */
+ struct list_head nexthop_neighs_list_node;
+};
+
+static const struct rhashtable_params mlxsw_sp_neigh_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_neigh_entry, key),
+ .head_offset = offsetof(struct mlxsw_sp_neigh_entry, ht_node),
+ .key_len = sizeof(struct mlxsw_sp_neigh_key),
+};
+
+static int
+mlxsw_sp_neigh_entry_insert(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ return rhashtable_insert_fast(&mlxsw_sp->router.neigh_ht,
+ &neigh_entry->ht_node,
+ mlxsw_sp_neigh_ht_params);
+}
+
+static void
+mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ rhashtable_remove_fast(&mlxsw_sp->router.neigh_ht,
+ &neigh_entry->ht_node,
+ mlxsw_sp_neigh_ht_params);
+}
+
+static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work);
+
+static struct mlxsw_sp_neigh_entry *
+mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len,
+ struct net_device *dev, u16 rif,
+ struct neighbour *n)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+
+ neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC);
+ if (!neigh_entry)
+ return NULL;
+ memcpy(neigh_entry->key.addr, addr, addr_len);
+ neigh_entry->key.dev = dev;
+ neigh_entry->rif = rif;
+ neigh_entry->n = n;
+ INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw);
+ INIT_LIST_HEAD(&neigh_entry->nexthop_list);
+ return neigh_entry;
+}
+
+static void
+mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry)
+{
+ kfree(neigh_entry);
+}
+
+static struct mlxsw_sp_neigh_entry *
+mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr,
+ size_t addr_len, struct net_device *dev)
+{
+ struct mlxsw_sp_neigh_key key = {{ 0 } };
+
+ memcpy(key.addr, addr, addr_len);
+ key.dev = dev;
+ return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht,
+ &key, mlxsw_sp_neigh_ht_params);
+}
+
+int mlxsw_sp_router_neigh_construct(struct net_device *dev,
+ struct neighbour *n)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct mlxsw_sp_rif *r;
+ u32 dip;
+ int err;
+
+ if (n->tbl != &arp_tbl)
+ return 0;
+
+ dip = ntohl(*((__be32 *) n->primary_key));
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
+ n->dev);
+ if (neigh_entry) {
+ WARN_ON(neigh_entry->n != n);
+ return 0;
+ }
+
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
+ if (WARN_ON(!r))
+ return -EINVAL;
+
+ neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev,
+ r->rif, n);
+ if (!neigh_entry)
+ return -ENOMEM;
+ err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry);
+ if (err)
+ goto err_neigh_entry_insert;
+ return 0;
+
+err_neigh_entry_insert:
+ mlxsw_sp_neigh_entry_destroy(neigh_entry);
+ return err;
+}
+
+void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
+ struct neighbour *n)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ u32 dip;
+
+ if (n->tbl != &arp_tbl)
+ return;
+
+ dip = ntohl(*((__be32 *) n->primary_key));
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip),
+ n->dev);
+ if (!neigh_entry)
+ return;
+ mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry);
+ mlxsw_sp_neigh_entry_destroy(neigh_entry);
+}
+
+static void
+mlxsw_sp_router_neighs_update_interval_init(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned long interval = NEIGH_VAR(&arp_tbl.parms, DELAY_PROBE_TIME);
+
+ mlxsw_sp->router.neighs_update.interval = jiffies_to_msecs(interval);
+}
+
+static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl,
+ int ent_index)
+{
+ struct net_device *dev;
+ struct neighbour *n;
+ __be32 dipn;
+ u32 dip;
+ u16 rif;
+
+ mlxsw_reg_rauhtd_ent_ipv4_unpack(rauhtd_pl, ent_index, &rif, &dip);
+
+ if (!mlxsw_sp->rifs[rif]) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect RIF in neighbour entry\n");
+ return;
+ }
+
+ dipn = htonl(dip);
+ dev = mlxsw_sp->rifs[rif]->dev;
+ n = neigh_lookup(&arp_tbl, &dipn, dev);
+ if (!n) {
+ netdev_err(dev, "Failed to find matching neighbour for IP=%pI4h\n",
+ &dip);
+ return;
+ }
+
+ netdev_dbg(dev, "Updating neighbour with IP=%pI4h\n", &dip);
+ neigh_event_send(n, NULL);
+ neigh_release(n);
+}
+
+static void mlxsw_sp_router_neigh_rec_ipv4_process(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl,
+ int rec_index)
+{
+ u8 num_entries;
+ int i;
+
+ num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl,
+ rec_index);
+ /* Hardware starts counting at 0, so add 1. */
+ num_entries++;
+
+ /* Each record consists of several neighbour entries. */
+ for (i = 0; i < num_entries; i++) {
+ int ent_index;
+
+ ent_index = rec_index * MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC + i;
+ mlxsw_sp_router_neigh_ent_ipv4_process(mlxsw_sp, rauhtd_pl,
+ ent_index);
+ }
+
+}
+
+static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp,
+ char *rauhtd_pl, int rec_index)
+{
+ switch (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, rec_index)) {
+ case MLXSW_REG_RAUHTD_TYPE_IPV4:
+ mlxsw_sp_router_neigh_rec_ipv4_process(mlxsw_sp, rauhtd_pl,
+ rec_index);
+ break;
+ case MLXSW_REG_RAUHTD_TYPE_IPV6:
+ WARN_ON_ONCE(1);
+ break;
+ }
+}
+
+static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp)
+{
+ char *rauhtd_pl;
+ u8 num_rec;
+ int i, err;
+
+ rauhtd_pl = kmalloc(MLXSW_REG_RAUHTD_LEN, GFP_KERNEL);
+ if (!rauhtd_pl)
+ return -ENOMEM;
+
+ /* Make sure the neighbour's netdev isn't removed in the
+ * process.
+ */
+ rtnl_lock();
+ do {
+ mlxsw_reg_rauhtd_pack(rauhtd_pl, MLXSW_REG_RAUHTD_TYPE_IPV4);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(rauhtd),
+ rauhtd_pl);
+ if (err) {
+ dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to dump neighbour talbe\n");
+ break;
+ }
+ num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl);
+ for (i = 0; i < num_rec; i++)
+ mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl,
+ i);
+ } while (num_rec);
+ rtnl_unlock();
+
+ kfree(rauhtd_pl);
+ return err;
+}
+
+static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+
+ /* Take RTNL mutex here to prevent lists from changes */
+ rtnl_lock();
+ list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
+ nexthop_neighs_list_node) {
+ /* If this neigh have nexthops, make the kernel think this neigh
+ * is active regardless of the traffic.
+ */
+ if (!list_empty(&neigh_entry->nexthop_list))
+ neigh_event_send(neigh_entry->n, NULL);
+ }
+ rtnl_unlock();
+}
+
+static void
+mlxsw_sp_router_neighs_update_work_schedule(struct mlxsw_sp *mlxsw_sp)
+{
+ unsigned long interval = mlxsw_sp->router.neighs_update.interval;
+
+ mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw,
+ msecs_to_jiffies(interval));
+}
+
+static void mlxsw_sp_router_neighs_update_work(struct work_struct *work)
+{
+ struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
+ router.neighs_update.dw.work);
+ int err;
+
+ err = mlxsw_sp_router_neighs_update_rauhtd(mlxsw_sp);
+ if (err)
+ dev_err(mlxsw_sp->bus_info->dev, "Could not update kernel for neigh activity");
+
+ mlxsw_sp_router_neighs_update_nh(mlxsw_sp);
+
+ mlxsw_sp_router_neighs_update_work_schedule(mlxsw_sp);
+}
+
+static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct mlxsw_sp *mlxsw_sp = container_of(work, struct mlxsw_sp,
+ router.nexthop_probe_dw.work);
+
+ /* Iterate over nexthop neighbours, find those who are unresolved and
+ * send arp on them. This solves the chicken-egg problem when
+ * the nexthop wouldn't get offloaded until the neighbor is resolved
+ * but it wouldn't get resolved ever in case traffic is flowing in HW
+ * using different nexthop.
+ *
+ * Take RTNL mutex here to prevent lists from changes.
+ */
+ rtnl_lock();
+ list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list,
+ nexthop_neighs_list_node) {
+ if (!(neigh_entry->n->nud_state & NUD_VALID) &&
+ !list_empty(&neigh_entry->nexthop_list))
+ neigh_event_send(neigh_entry->n, NULL);
+ }
+ rtnl_unlock();
+
+ mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw,
+ MLXSW_SP_UNRESOLVED_NH_PROBE_INTERVAL);
+}
+
+static void
+mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ bool removing);
+
+static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry =
+ container_of(work, struct mlxsw_sp_neigh_entry, dw.work);
+ struct neighbour *n = neigh_entry->n;
+ struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char rauht_pl[MLXSW_REG_RAUHT_LEN];
+ struct net_device *dev;
+ bool entry_connected;
+ u8 nud_state;
+ bool updating;
+ bool removing;
+ bool adding;
+ u32 dip;
+ int err;
+
+ read_lock_bh(&n->lock);
+ dip = ntohl(*((__be32 *) n->primary_key));
+ memcpy(neigh_entry->ha, n->ha, sizeof(neigh_entry->ha));
+ nud_state = n->nud_state;
+ dev = n->dev;
+ read_unlock_bh(&n->lock);
+
+ entry_connected = nud_state & NUD_VALID;
+ adding = (!neigh_entry->offloaded) && entry_connected;
+ updating = neigh_entry->offloaded && entry_connected;
+ removing = neigh_entry->offloaded && !entry_connected;
+
+ if (adding || updating) {
+ mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_ADD,
+ neigh_entry->rif,
+ neigh_entry->ha, dip);
+ err = mlxsw_reg_write(mlxsw_sp->core,
+ MLXSW_REG(rauht), rauht_pl);
+ if (err) {
+ netdev_err(dev, "Could not add neigh %pI4h\n", &dip);
+ neigh_entry->offloaded = false;
+ } else {
+ neigh_entry->offloaded = true;
+ }
+ mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, false);
+ } else if (removing) {
+ mlxsw_reg_rauht_pack4(rauht_pl, MLXSW_REG_RAUHT_OP_WRITE_DELETE,
+ neigh_entry->rif,
+ neigh_entry->ha, dip);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rauht),
+ rauht_pl);
+ if (err) {
+ netdev_err(dev, "Could not delete neigh %pI4h\n", &dip);
+ neigh_entry->offloaded = true;
+ } else {
+ neigh_entry->offloaded = false;
+ }
+ mlxsw_sp_nexthop_neigh_update(mlxsw_sp, neigh_entry, true);
+ }
+
+ neigh_release(n);
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
+}
+
+int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct mlxsw_sp_port *mlxsw_sp_port;
+ struct mlxsw_sp *mlxsw_sp;
+ unsigned long interval;
+ struct net_device *dev;
+ struct neigh_parms *p;
+ struct neighbour *n;
+ u32 dip;
+
+ switch (event) {
+ case NETEVENT_DELAY_PROBE_TIME_UPDATE:
+ p = ptr;
+
+ /* We don't care about changes in the default table. */
+ if (!p->dev || p->tbl != &arp_tbl)
+ return NOTIFY_DONE;
+
+ /* We are in atomic context and can't take RTNL mutex,
+ * so use RCU variant to walk the device chain.
+ */
+ mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
+ if (!mlxsw_sp_port)
+ return NOTIFY_DONE;
+
+ mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
+ mlxsw_sp->router.neighs_update.interval = interval;
+
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
+ break;
+ case NETEVENT_NEIGH_UPDATE:
+ n = ptr;
+ dev = n->dev;
+
+ if (n->tbl != &arp_tbl)
+ return NOTIFY_DONE;
+
+ mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(dev);
+ if (!mlxsw_sp_port)
+ return NOTIFY_DONE;
+
+ mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ dip = ntohl(*((__be32 *) n->primary_key));
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp,
+ &dip,
+ sizeof(__be32),
+ dev);
+ if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) {
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
+ return NOTIFY_DONE;
+ }
+ neigh_entry->mlxsw_sp_port = mlxsw_sp_port;
+
+ /* Take a reference to ensure the neighbour won't be
+ * destructed until we drop the reference in delayed
+ * work.
+ */
+ neigh_clone(n);
+ if (!mlxsw_core_schedule_dw(&neigh_entry->dw, 0)) {
+ neigh_release(n);
+ mlxsw_sp_port_dev_put(mlxsw_sp_port);
+ }
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ err = rhashtable_init(&mlxsw_sp->router.neigh_ht,
+ &mlxsw_sp_neigh_ht_params);
+ if (err)
+ return err;
+
+ /* Initialize the polling interval according to the default
+ * table.
+ */
+ mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
+
+ /* Create the delayed works for the activity_update */
+ INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
+ mlxsw_sp_router_neighs_update_work);
+ INIT_DELAYED_WORK(&mlxsw_sp->router.nexthop_probe_dw,
+ mlxsw_sp_router_probe_unresolved_nexthops);
+ mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
+ mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
+ return 0;
+}
+
+static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
+ cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
+ rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
+}
+
+struct mlxsw_sp_nexthop {
+ struct list_head neigh_list_node; /* member of neigh entry list */
+ struct mlxsw_sp_nexthop_group *nh_grp; /* pointer back to the group
+ * this belongs to
+ */
+ u8 should_offload:1, /* set indicates this neigh is connected and
+ * should be put to KVD linear area of this group.
+ */
+ offloaded:1, /* set in case the neigh is actually put into
+ * KVD linear area of this group.
+ */
+ update:1; /* set indicates that MAC of this neigh should be
+ * updated in HW
+ */
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+};
+
+struct mlxsw_sp_nexthop_group {
+ struct list_head list; /* node in mlxsw->router.nexthop_group_list */
+ struct list_head fib_list; /* list of fib entries that use this group */
+ u8 adj_index_valid:1;
+ u32 adj_index;
+ u16 ecmp_size;
+ u16 count;
+ struct mlxsw_sp_nexthop nexthops[0];
+};
+
+static int mlxsw_sp_adj_index_mass_update_vr(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_vr *vr,
+ u32 adj_index, u16 ecmp_size,
+ u32 new_adj_index,
+ u16 new_ecmp_size)
+{
+ char raleu_pl[MLXSW_REG_RALEU_LEN];
+
+ mlxsw_reg_raleu_pack(raleu_pl, vr->proto, vr->id,
+ adj_index, ecmp_size,
+ new_adj_index, new_ecmp_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(raleu), raleu_pl);
+}
+
+static int mlxsw_sp_adj_index_mass_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ u32 old_adj_index, u16 old_ecmp_size)
+{
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_vr *vr = NULL;
+ int err;
+
+ list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
+ if (vr == fib_entry->vr)
+ continue;
+ vr = fib_entry->vr;
+ err = mlxsw_sp_adj_index_mass_update_vr(mlxsw_sp, vr,
+ old_adj_index,
+ old_ecmp_size,
+ nh_grp->adj_index,
+ nh_grp->ecmp_size);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
+ char ratr_pl[MLXSW_REG_RATR_LEN];
+
+ mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY,
+ true, adj_index, neigh_entry->rif);
+ mlxsw_reg_ratr_eth_entry_pack(ratr_pl, neigh_entry->ha);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+}
+
+static int
+mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ u32 adj_index = nh_grp->adj_index; /* base */
+ struct mlxsw_sp_nexthop *nh;
+ int i;
+ int err;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+
+ if (!nh->should_offload) {
+ nh->offloaded = 0;
+ continue;
+ }
+
+ if (nh->update) {
+ err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
+ adj_index, nh);
+ if (err)
+ return err;
+ nh->update = 0;
+ nh->offloaded = 1;
+ }
+ adj_index++;
+ }
+ return 0;
+}
+
+static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry);
+
+static int
+mlxsw_sp_nexthop_fib_entries_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_fib_entry *fib_entry;
+ int err;
+
+ list_for_each_entry(fib_entry, &nh_grp->fib_list, nexthop_group_node) {
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ if (err)
+ return err;
+ }
+ return 0;
+}
+
+static void
+mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop *nh;
+ bool offload_change = false;
+ u32 adj_index;
+ u16 ecmp_size = 0;
+ bool old_adj_index_valid;
+ u32 old_adj_index;
+ u16 old_ecmp_size;
+ int ret;
+ int i;
+ int err;
+
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+
+ if (nh->should_offload ^ nh->offloaded) {
+ offload_change = true;
+ if (nh->should_offload)
+ nh->update = 1;
+ }
+ if (nh->should_offload)
+ ecmp_size++;
+ }
+ if (!offload_change) {
+ /* Nothing was added or removed, so no need to reallocate. Just
+ * update MAC on existing adjacency indexes.
+ */
+ err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
+ goto set_trap;
+ }
+ return;
+ }
+ if (!ecmp_size)
+ /* No neigh of this group is connected so we just set
+ * the trap and let everthing flow through kernel.
+ */
+ goto set_trap;
+
+ ret = mlxsw_sp_kvdl_alloc(mlxsw_sp, ecmp_size);
+ if (ret < 0) {
+ /* We ran out of KVD linear space, just set the
+ * trap and let everything flow through kernel.
+ */
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to allocate KVD linear area for nexthop group.\n");
+ goto set_trap;
+ }
+ adj_index = ret;
+ old_adj_index_valid = nh_grp->adj_index_valid;
+ old_adj_index = nh_grp->adj_index;
+ old_ecmp_size = nh_grp->ecmp_size;
+ nh_grp->adj_index_valid = 1;
+ nh_grp->adj_index = adj_index;
+ nh_grp->ecmp_size = ecmp_size;
+ err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
+ goto set_trap;
+ }
+
+ if (!old_adj_index_valid) {
+ /* The trap was set for fib entries, so we have to call
+ * fib entry update to unset it and use adjacency index.
+ */
+ err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to add adjacency index to fib entries.\n");
+ goto set_trap;
+ }
+ return;
+ }
+
+ err = mlxsw_sp_adj_index_mass_update(mlxsw_sp, nh_grp,
+ old_adj_index, old_ecmp_size);
+ mlxsw_sp_kvdl_free(mlxsw_sp, old_adj_index);
+ if (err) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to mass-update adjacency index for nexthop group.\n");
+ goto set_trap;
+ }
+ return;
+
+set_trap:
+ old_adj_index_valid = nh_grp->adj_index_valid;
+ nh_grp->adj_index_valid = 0;
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+ nh->offloaded = 0;
+ }
+ err = mlxsw_sp_nexthop_fib_entries_update(mlxsw_sp, nh_grp);
+ if (err)
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to set traps for fib entries.\n");
+ if (old_adj_index_valid)
+ mlxsw_sp_kvdl_free(mlxsw_sp, nh_grp->adj_index);
+}
+
+static void __mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp_nexthop *nh,
+ bool removing)
+{
+ if (!removing && !nh->should_offload)
+ nh->should_offload = 1;
+ else if (removing && nh->offloaded)
+ nh->should_offload = 0;
+ nh->update = 1;
+}
+
+static void
+mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_neigh_entry *neigh_entry,
+ bool removing)
+{
+ struct mlxsw_sp_nexthop *nh;
+
+ /* Take RTNL mutex here to prevent lists from changes */
+ rtnl_lock();
+ list_for_each_entry(nh, &neigh_entry->nexthop_list,
+ neigh_list_node) {
+ __mlxsw_sp_nexthop_neigh_update(nh, removing);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nh_grp);
+ }
+ rtnl_unlock();
+}
+
+static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ struct mlxsw_sp_nexthop *nh,
+ struct fib_nh *fib_nh)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry;
+ u32 gwip = ntohl(fib_nh->nh_gw);
+ struct net_device *dev = fib_nh->nh_dev;
+ struct neighbour *n;
+ u8 nud_state;
+
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
+ sizeof(gwip), dev);
+ if (!neigh_entry) {
+ __be32 gwipn = htonl(gwip);
+
+ n = neigh_create(&arp_tbl, &gwipn, dev);
+ if (IS_ERR(n))
+ return PTR_ERR(n);
+ neigh_event_send(n, NULL);
+ neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip,
+ sizeof(gwip), dev);
+ if (!neigh_entry) {
+ neigh_release(n);
+ return -EINVAL;
+ }
+ } else {
+ /* Take a reference of neigh here ensuring that neigh would
+ * not be detructed before the nexthop entry is finished.
+ * The second branch takes the reference in neith_create()
+ */
+ n = neigh_entry->n;
+ neigh_clone(n);
+ }
+
+ /* If that is the first nexthop connected to that neigh, add to
+ * nexthop_neighs_list
+ */
+ if (list_empty(&neigh_entry->nexthop_list))
+ list_add_tail(&neigh_entry->nexthop_neighs_list_node,
+ &mlxsw_sp->router.nexthop_neighs_list);
+
+ nh->nh_grp = nh_grp;
+ nh->neigh_entry = neigh_entry;
+ list_add_tail(&nh->neigh_list_node, &neigh_entry->nexthop_list);
+ read_lock_bh(&n->lock);
+ nud_state = n->nud_state;
+ read_unlock_bh(&n->lock);
+ __mlxsw_sp_nexthop_neigh_update(nh, !(nud_state & NUD_VALID));
+
+ return 0;
+}
+
+static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
+
+ list_del(&nh->neigh_list_node);
+
+ /* If that is the last nexthop connected to that neigh, remove from
+ * nexthop_neighs_list
+ */
+ if (list_empty(&nh->neigh_entry->nexthop_list))
+ list_del(&nh->neigh_entry->nexthop_neighs_list_node);
+
+ neigh_release(neigh_entry->n);
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+ struct mlxsw_sp_nexthop *nh;
+ struct fib_nh *fib_nh;
+ size_t alloc_size;
+ int i;
+ int err;
+
+ alloc_size = sizeof(*nh_grp) +
+ fi->fib_nhs * sizeof(struct mlxsw_sp_nexthop);
+ nh_grp = kzalloc(alloc_size, GFP_KERNEL);
+ if (!nh_grp)
+ return ERR_PTR(-ENOMEM);
+ INIT_LIST_HEAD(&nh_grp->fib_list);
+ nh_grp->count = fi->fib_nhs;
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+ fib_nh = &fi->fib_nh[i];
+ err = mlxsw_sp_nexthop_init(mlxsw_sp, nh_grp, nh, fib_nh);
+ if (err)
+ goto err_nexthop_init;
+ }
+ list_add_tail(&nh_grp->list, &mlxsw_sp->router.nexthop_group_list);
+ mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
+ return nh_grp;
+
+err_nexthop_init:
+ for (i--; i >= 0; i--)
+ mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
+ kfree(nh_grp);
+ return ERR_PTR(err);
+}
+
+static void
+mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop_group *nh_grp)
+{
+ struct mlxsw_sp_nexthop *nh;
+ int i;
+
+ list_del(&nh_grp->list);
+ for (i = 0; i < nh_grp->count; i++) {
+ nh = &nh_grp->nexthops[i];
+ mlxsw_sp_nexthop_fini(mlxsw_sp, nh);
+ }
+ kfree(nh_grp);
+}
+
+static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh,
+ struct fib_info *fi)
+{
+ int i;
+
+ for (i = 0; i < fi->fib_nhs; i++) {
+ struct fib_nh *fib_nh = &fi->fib_nh[i];
+ u32 gwip = ntohl(fib_nh->nh_gw);
+
+ if (memcmp(nh->neigh_entry->key.addr,
+ &gwip, sizeof(u32)) == 0 &&
+ nh->neigh_entry->key.dev == fib_nh->nh_dev)
+ return true;
+ }
+ return false;
+}
+
+static bool mlxsw_sp_nexthop_group_match(struct mlxsw_sp_nexthop_group *nh_grp,
+ struct fib_info *fi)
+{
+ int i;
+
+ if (nh_grp->count != fi->fib_nhs)
+ return false;
+ for (i = 0; i < nh_grp->count; i++) {
+ struct mlxsw_sp_nexthop *nh = &nh_grp->nexthops[i];
+
+ if (!mlxsw_sp_nexthop_match(nh, fi))
+ return false;
+ }
+ return true;
+}
+
+static struct mlxsw_sp_nexthop_group *
+mlxsw_sp_nexthop_group_find(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+
+ list_for_each_entry(nh_grp, &mlxsw_sp->router.nexthop_group_list,
+ list) {
+ if (mlxsw_sp_nexthop_group_match(nh_grp, fi))
+ return nh_grp;
+ }
+ return NULL;
+}
+
+static int mlxsw_sp_nexthop_group_get(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ struct fib_info *fi)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp;
+
+ nh_grp = mlxsw_sp_nexthop_group_find(mlxsw_sp, fi);
+ if (!nh_grp) {
+ nh_grp = mlxsw_sp_nexthop_group_create(mlxsw_sp, fi);
+ if (IS_ERR(nh_grp))
+ return PTR_ERR(nh_grp);
+ }
+ list_add_tail(&fib_entry->nexthop_group_node, &nh_grp->fib_list);
+ fib_entry->nh_group = nh_grp;
+ return 0;
+}
+
+static void mlxsw_sp_nexthop_group_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_nexthop_group *nh_grp = fib_entry->nh_group;
+
+ list_del(&fib_entry->nexthop_group_node);
+ if (!list_empty(&nh_grp->fib_list))
+ return;
+ mlxsw_sp_nexthop_group_destroy(mlxsw_sp, nh_grp);
+}
+
+static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, true);
+ mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, MLXSW_SP_RIF_MAX);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+}
+
+static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ char rgcr_pl[MLXSW_REG_RGCR_LEN];
+
+ mlxsw_reg_rgcr_pack(rgcr_pl, false);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
+}
+
+int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_neighs_list);
+ INIT_LIST_HEAD(&mlxsw_sp->router.nexthop_group_list);
+ err = __mlxsw_sp_router_init(mlxsw_sp);
+ if (err)
+ return err;
+ mlxsw_sp_lpm_init(mlxsw_sp);
+ mlxsw_sp_vrs_init(mlxsw_sp);
+ err = mlxsw_sp_neigh_init(mlxsw_sp);
+ if (err)
+ goto err_neigh_init;
+ return 0;
+
+err_neigh_init:
+ __mlxsw_sp_router_fini(mlxsw_sp);
+ return err;
+}
+
+void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_neigh_fini(mlxsw_sp);
+ __mlxsw_sp_router_fini(mlxsw_sp);
+}
+
+static int mlxsw_sp_fib_entry_op4_remote(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ u32 *p_dip = (u32 *) fib_entry->key.addr;
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
+ enum mlxsw_reg_ralue_trap_action trap_action;
+ u16 trap_id = 0;
+ u32 adjacency_index = 0;
+ u16 ecmp_size = 0;
+
+ /* In case the nexthop group adjacency index is valid, use it
+ * with provided ECMP size. Otherwise, setup trap and pass
+ * traffic to kernel.
+ */
+ if (fib_entry->nh_group->adj_index_valid) {
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
+ adjacency_index = fib_entry->nh_group->adj_index;
+ ecmp_size = fib_entry->nh_group->ecmp_size;
+ } else {
+ trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
+ trap_id = MLXSW_TRAP_ID_RTR_INGRESS0;
+ }
+
+ mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
+ fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_act_remote_pack(ralue_pl, trap_action, trap_id,
+ adjacency_index, ecmp_size);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int mlxsw_sp_fib_entry_op4_local(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ u32 *p_dip = (u32 *) fib_entry->key.addr;
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
+
+ mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
+ fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_act_local_pack(ralue_pl,
+ MLXSW_REG_RALUE_TRAP_ACTION_NOP, 0,
+ fib_entry->rif);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int mlxsw_sp_fib_entry_op4_trap(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ char ralue_pl[MLXSW_REG_RALUE_LEN];
+ u32 *p_dip = (u32 *) fib_entry->key.addr;
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
+
+ mlxsw_reg_ralue_pack4(ralue_pl, vr->proto, op, vr->id,
+ fib_entry->key.prefix_len, *p_dip);
+ mlxsw_reg_ralue_act_ip2me_pack(ralue_pl);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl);
+}
+
+static int mlxsw_sp_fib_entry_op4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ switch (fib_entry->type) {
+ case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
+ return mlxsw_sp_fib_entry_op4_remote(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
+ return mlxsw_sp_fib_entry_op4_local(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_FIB_ENTRY_TYPE_TRAP:
+ return mlxsw_sp_fib_entry_op4_trap(mlxsw_sp, fib_entry, op);
+ }
+ return -EINVAL;
+}
+
+static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ enum mlxsw_reg_ralue_op op)
+{
+ switch (fib_entry->vr->proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ return mlxsw_sp_fib_entry_op4(mlxsw_sp, fib_entry, op);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ return -EINVAL;
+ }
+ return -EINVAL;
+}
+
+static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE);
+}
+
+static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_DELETE);
+}
+
+struct mlxsw_sp_router_fib4_add_info {
+ struct switchdev_trans_item tritem;
+ struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_fib_entry *fib_entry;
+};
+
+static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
+{
+ const struct mlxsw_sp_router_fib4_add_info *info = data;
+ struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
+ struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
+
+ mlxsw_sp_fib_entry_destroy(fib_entry);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+ kfree(info);
+}
+
+static int
+mlxsw_sp_router_fib4_entry_init(struct mlxsw_sp *mlxsw_sp,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct fib_info *fi = fib4->fi;
+
+ if (fib4->type == RTN_LOCAL || fib4->type == RTN_BROADCAST) {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ return 0;
+ }
+ if (fib4->type != RTN_UNICAST)
+ return -EINVAL;
+
+ if (fi->fib_scope != RT_SCOPE_UNIVERSE) {
+ struct mlxsw_sp_rif *r;
+
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, fi->fib_dev);
+ if (!r)
+ return -EINVAL;
+ fib_entry->rif = r->rif;
+ return 0;
+ }
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
+ return mlxsw_sp_nexthop_group_get(mlxsw_sp, fib_entry, fi);
+}
+
+static void
+mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ if (fib_entry->type != MLXSW_SP_FIB_ENTRY_TYPE_REMOTE)
+ return;
+ mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
+ const struct switchdev_obj_ipv4_fib *fib4)
+{
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct fib_info *fi = fib4->fi;
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
+ MLXSW_SP_L3_PROTO_IPV4);
+ if (IS_ERR(vr))
+ return ERR_CAST(vr);
+
+ fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
+ sizeof(fib4->dst),
+ fib4->dst_len, fi->fib_dev);
+ if (fib_entry) {
+ /* Already exists, just take a reference */
+ fib_entry->ref_count++;
+ return fib_entry;
+ }
+ fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
+ sizeof(fib4->dst),
+ fib4->dst_len, fi->fib_dev);
+ if (!fib_entry) {
+ err = -ENOMEM;
+ goto err_fib_entry_create;
+ }
+ fib_entry->vr = vr;
+ fib_entry->ref_count = 1;
+
+ err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
+ if (err)
+ goto err_fib4_entry_init;
+
+ return fib_entry;
+
+err_fib4_entry_init:
+ mlxsw_sp_fib_entry_destroy(fib_entry);
+err_fib_entry_create:
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+
+ return ERR_PTR(err);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
+ const struct switchdev_obj_ipv4_fib *fib4)
+{
+ struct mlxsw_sp_vr *vr;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
+ if (!vr)
+ return NULL;
+
+ return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
+ sizeof(fib4->dst), fib4->dst_len,
+ fib4->fi->fib_dev);
+}
+
+void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
+
+ if (--fib_entry->ref_count == 0) {
+ mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_entry_destroy(fib_entry);
+ }
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
+static int
+mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_router_fib4_add_info *info;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ int err;
+
+ fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4);
+ if (IS_ERR(fib_entry))
+ return PTR_ERR(fib_entry);
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ err = -ENOMEM;
+ goto err_alloc_info;
+ }
+ info->mlxsw_sp = mlxsw_sp;
+ info->fib_entry = fib_entry;
+ switchdev_trans_item_enqueue(trans, info,
+ mlxsw_sp_router_fib4_add_info_destroy,
+ &info->tritem);
+ return 0;
+
+err_alloc_info:
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
+ return err;
+}
+
+static int
+mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_router_fib4_add_info *info;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ struct mlxsw_sp_vr *vr;
+ int err;
+
+ info = switchdev_trans_item_dequeue(trans);
+ fib_entry = info->fib_entry;
+ kfree(info);
+
+ if (fib_entry->ref_count != 1)
+ return 0;
+
+ vr = fib_entry->vr;
+ err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
+ if (err)
+ goto err_fib_entry_insert;
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry);
+ if (err)
+ goto err_fib_entry_add;
+ return 0;
+
+err_fib_entry_add:
+ mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
+err_fib_entry_insert:
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
+ return err;
+}
+
+int mlxsw_sp_router_fib4_add(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans)
+{
+ if (switchdev_trans_ph_prepare(trans))
+ return mlxsw_sp_router_fib4_add_prepare(mlxsw_sp_port,
+ fib4, trans);
+ return mlxsw_sp_router_fib4_add_commit(mlxsw_sp_port,
+ fib4, trans);
+}
+
+int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_fib_entry *fib_entry;
+
+ fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4);
+ if (!fib_entry) {
+ dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
+ return -ENOENT;
+ }
+
+ if (fib_entry->ref_count == 1) {
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
+ }
+
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 9cd6f472234a..7b654c517b91 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -55,13 +55,10 @@
static u16 mlxsw_sp_port_vid_to_fid_get(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
{
+ struct mlxsw_sp_fid *f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
u16 fid = vid;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
-
- fid = mlxsw_sp_vfid_to_fid(vfid);
- }
+ fid = f ? f->fid : fid;
if (!fid)
fid = mlxsw_sp_port->pvid;
@@ -169,14 +166,9 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_port_stp_state_set(mlxsw_sp_port, state);
}
-static bool mlxsw_sp_vfid_is_vport_br(u16 vfid)
-{
- return vfid >= MLXSW_SP_VFID_PORT_MAX;
-}
-
static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 idx_begin, u16 idx_end, bool set,
- bool only_uc)
+ u16 idx_begin, u16 idx_end, bool uc_set,
+ bool bm_set)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 local_port = mlxsw_sp_port->local_port;
@@ -185,43 +177,32 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
char *sftr_pl;
int err;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
+ if (mlxsw_sp_port_is_vport(mlxsw_sp_port))
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
- if (mlxsw_sp_vfid_is_vport_br(idx_begin))
- local_port = mlxsw_sp_port->local_port;
- else
- local_port = MLXSW_PORT_CPU_PORT;
- } else {
+ else
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
- }
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl)
return -ENOMEM;
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
- table_type, range, local_port, set);
+ table_type, range, local_port, uc_set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
if (err)
goto buffer_out;
- /* Flooding control allows one to decide whether a given port will
- * flood unicast traffic for which there is no FDB entry.
- */
- if (only_uc)
- goto buffer_out;
-
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
- table_type, range, local_port, set);
+ table_type, range, local_port, bm_set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
if (err)
goto err_flood_bm_set;
- else
- goto buffer_out;
+
+ goto buffer_out;
err_flood_bm_set:
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
- table_type, range, local_port, !set);
+ table_type, range, local_port, !uc_set);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
buffer_out:
kfree(sftr_pl);
@@ -236,7 +217,8 @@ static int mlxsw_sp_port_uc_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
int err;
if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- u16 vfid = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
+ u16 fid = mlxsw_sp_vport_fid_get(mlxsw_sp_port)->fid;
+ u16 vfid = mlxsw_sp_fid_to_vfid(fid);
return __mlxsw_sp_port_flood_set(mlxsw_sp_port, vfid, vfid,
set, true);
@@ -260,14 +242,16 @@ err_port_flood_set:
return err;
}
-int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
- bool set, bool only_uc)
+int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
+ bool set)
{
+ u16 vfid;
+
/* In case of vFIDs, index into the flooding table is relative to
* the start of the vFIDs range.
*/
- return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
- only_uc);
+ vfid = mlxsw_sp_fid_to_vfid(fid);
+ return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -383,6 +367,192 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev,
return err;
}
+static int mlxsw_sp_fid_op(struct mlxsw_sp *mlxsw_sp, u16 fid, bool create)
+{
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
+
+ mlxsw_reg_sfmr_pack(sfmr_pl, !create, fid, fid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
+}
+
+static int mlxsw_sp_fid_map(struct mlxsw_sp *mlxsw_sp, u16 fid, bool valid)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
+ char svfa_pl[MLXSW_REG_SVFA_LEN];
+
+ mlxsw_reg_svfa_pack(svfa_pl, 0, mt, valid, fid, fid);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svfa), svfa_pl);
+}
+
+static struct mlxsw_sp_fid *mlxsw_sp_fid_alloc(u16 fid)
+{
+ struct mlxsw_sp_fid *f;
+
+ f = kzalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ f->fid = fid;
+
+ return f;
+}
+
+struct mlxsw_sp_fid *mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
+{
+ struct mlxsw_sp_fid *f;
+ int err;
+
+ err = mlxsw_sp_fid_op(mlxsw_sp, fid, true);
+ if (err)
+ return ERR_PTR(err);
+
+ /* Although all the ports member in the FID might be using a
+ * {Port, VID} to FID mapping, we create a global VID-to-FID
+ * mapping. This allows a port to transition to VLAN mode,
+ * knowing the global mapping exists.
+ */
+ err = mlxsw_sp_fid_map(mlxsw_sp, fid, true);
+ if (err)
+ goto err_fid_map;
+
+ f = mlxsw_sp_fid_alloc(fid);
+ if (!f) {
+ err = -ENOMEM;
+ goto err_allocate_fid;
+ }
+
+ list_add(&f->list, &mlxsw_sp->fids);
+
+ return f;
+
+err_allocate_fid:
+ mlxsw_sp_fid_map(mlxsw_sp, fid, false);
+err_fid_map:
+ mlxsw_sp_fid_op(mlxsw_sp, fid, false);
+ return ERR_PTR(err);
+}
+
+void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
+{
+ u16 fid = f->fid;
+
+ list_del(&f->list);
+
+ if (f->r)
+ mlxsw_sp_rif_bridge_destroy(mlxsw_sp, f->r);
+
+ kfree(f);
+
+ mlxsw_sp_fid_map(mlxsw_sp, fid, false);
+
+ mlxsw_sp_fid_op(mlxsw_sp, fid, false);
+}
+
+static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
+{
+ struct mlxsw_sp_fid *f;
+
+ if (test_bit(fid, mlxsw_sp_port->active_vlans))
+ return 0;
+
+ f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
+ if (!f) {
+ f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
+ if (IS_ERR(f))
+ return PTR_ERR(f);
+ }
+
+ f->ref_count++;
+
+ netdev_dbg(mlxsw_sp_port->dev, "Joined FID=%d\n", fid);
+
+ return 0;
+}
+
+static void __mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid)
+{
+ struct mlxsw_sp_fid *f;
+
+ f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
+ if (WARN_ON(!f))
+ return;
+
+ netdev_dbg(mlxsw_sp_port->dev, "Left FID=%d\n", fid);
+
+ mlxsw_sp_port_fdb_flush(mlxsw_sp_port, fid);
+
+ if (--f->ref_count == 0)
+ mlxsw_sp_fid_destroy(mlxsw_sp_port->mlxsw_sp, f);
+}
+
+static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid,
+ bool valid)
+{
+ enum mlxsw_reg_svfa_mt mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
+
+ /* If port doesn't have vPorts, then it can use the global
+ * VID-to-FID mapping.
+ */
+ if (list_empty(&mlxsw_sp_port->vports_list))
+ return 0;
+
+ return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, valid, fid, fid);
+}
+
+static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid_begin, u16 fid_end)
+{
+ int fid, err;
+
+ for (fid = fid_begin; fid <= fid_end; fid++) {
+ err = __mlxsw_sp_port_fid_join(mlxsw_sp_port, fid);
+ if (err)
+ goto err_port_fid_join;
+ }
+
+ err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
+ mlxsw_sp_port->uc_flood, true);
+ if (err)
+ goto err_port_flood_set;
+
+ for (fid = fid_begin; fid <= fid_end; fid++) {
+ err = mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, true);
+ if (err)
+ goto err_port_fid_map;
+ }
+
+ return 0;
+
+err_port_fid_map:
+ for (fid--; fid >= fid_begin; fid--)
+ mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
+ __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
+ false);
+err_port_flood_set:
+ fid = fid_end;
+err_port_fid_join:
+ for (fid--; fid >= fid_begin; fid--)
+ __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
+ return err;
+}
+
+static void mlxsw_sp_port_fid_leave(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 fid_begin, u16 fid_end)
+{
+ int fid;
+
+ for (fid = fid_begin; fid <= fid_end; fid++)
+ mlxsw_sp_port_fid_map(mlxsw_sp_port, fid, false);
+
+ __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end, false,
+ false);
+
+ for (fid = fid_begin; fid <= fid_end; fid++)
+ __mlxsw_sp_port_fid_leave(mlxsw_sp_port, fid);
+}
+
static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid)
{
@@ -440,74 +610,6 @@ err_port_allow_untagged_set:
return err;
}
-static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
-{
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
- int err;
-
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, fid);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-
- if (err)
- return err;
-
- set_bit(fid, mlxsw_sp->active_fids);
- return 0;
-}
-
-static void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, u16 fid)
-{
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
-
- clear_bit(fid, mlxsw_sp->active_fids);
-
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
- fid, fid);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
-}
-
-static int mlxsw_sp_port_fid_map(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
-{
- enum mlxsw_reg_svfa_mt mt;
-
- if (!list_empty(&mlxsw_sp_port->vports_list))
- mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- else
- mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
-
- return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, true, fid, fid);
-}
-
-static int mlxsw_sp_port_fid_unmap(struct mlxsw_sp_port *mlxsw_sp_port, u16 fid)
-{
- enum mlxsw_reg_svfa_mt mt;
-
- if (list_empty(&mlxsw_sp_port->vports_list))
- return 0;
-
- mt = MLXSW_REG_SVFA_MT_PORT_VID_TO_FID;
- return mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt, false, fid, fid);
-}
-
-static int mlxsw_sp_port_add_vids(struct net_device *dev, u16 vid_begin,
- u16 vid_end)
-{
- u16 vid;
- int err;
-
- for (vid = vid_begin; vid <= vid_end; vid++) {
- err = mlxsw_sp_port_add_vid(dev, 0, vid);
- if (err)
- goto err_port_add_vid;
- }
- return 0;
-
-err_port_add_vid:
- for (vid--; vid >= vid_begin; vid--)
- mlxsw_sp_port_kill_vid(dev, 0, vid);
- return err;
-}
-
static int __mlxsw_sp_port_vlans_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end, bool is_member,
bool untagged)
@@ -533,57 +635,17 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid_begin, u16 vid_end,
bool flag_untagged, bool flag_pvid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct net_device *dev = mlxsw_sp_port->dev;
- u16 vid, last_visited_vid, old_pvid;
- enum mlxsw_reg_svfa_mt mt;
+ u16 vid, old_pvid;
int err;
- /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
- * not bridged, then packets ingressing through the port with
- * the specified VIDs will be directed to CPU.
- */
if (!mlxsw_sp_port->bridged)
- return mlxsw_sp_port_add_vids(dev, vid_begin, vid_end);
-
- for (vid = vid_begin; vid <= vid_end; vid++) {
- if (!test_bit(vid, mlxsw_sp->active_fids)) {
- err = mlxsw_sp_fid_create(mlxsw_sp, vid);
- if (err) {
- netdev_err(dev, "Failed to create FID=%d\n",
- vid);
- return err;
- }
-
- /* When creating a FID, we set a VID to FID mapping
- * regardless of the port's mode.
- */
- mt = MLXSW_REG_SVFA_MT_VID_TO_FID;
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, mt,
- true, vid, vid);
- if (err) {
- netdev_err(dev, "Failed to create FID=VID=%d mapping\n",
- vid);
- goto err_port_vid_to_fid_set;
- }
- }
- }
-
- /* Set FID mapping according to port's mode */
- for (vid = vid_begin; vid <= vid_end; vid++) {
- err = mlxsw_sp_port_fid_map(mlxsw_sp_port, vid);
- if (err) {
- netdev_err(dev, "Failed to map FID=%d", vid);
- last_visited_vid = --vid;
- goto err_port_fid_map;
- }
- }
+ return -EINVAL;
- err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
- true, false);
+ err = mlxsw_sp_port_fid_join(mlxsw_sp_port, vid_begin, vid_end);
if (err) {
- netdev_err(dev, "Failed to configure flooding\n");
- goto err_port_flood_set;
+ netdev_err(dev, "Failed to join FIDs\n");
+ return err;
}
err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
@@ -628,10 +690,6 @@ static int __mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
-err_port_vid_to_fid_set:
- mlxsw_sp_fid_destroy(mlxsw_sp, vid);
- return err;
-
err_port_stp_state_set:
for (vid = vid_begin; vid <= vid_end; vid++)
clear_bit(vid, mlxsw_sp_port->active_vlans);
@@ -641,13 +699,7 @@ err_port_pvid_set:
__mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end, false,
false);
err_port_vlans_set:
- __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end, false,
- false);
-err_port_flood_set:
- last_visited_vid = vid_end;
-err_port_fid_map:
- for (vid = last_visited_vid; vid >= vid_begin; vid--)
- mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
+ mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
return err;
}
@@ -678,9 +730,10 @@ static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
MLXSW_REG_SFD_OP_WRITE_REMOVE;
}
-static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- const char *mac, u16 fid, bool adding,
- bool dynamic)
+static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ const char *mac, u16 fid, bool adding,
+ enum mlxsw_reg_sfd_rec_action action,
+ bool dynamic)
{
char *sfd_pl;
int err;
@@ -691,14 +744,29 @@ static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
mlxsw_reg_sfd_uc_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
- mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
- local_port);
+ mac, fid, action, local_port);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
kfree(sfd_pl);
return err;
}
+static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ const char *mac, u16 fid, bool adding,
+ bool dynamic)
+{
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
+ MLXSW_REG_SFD_REC_ACTION_NOP, dynamic);
+}
+
+int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
+ bool adding)
+{
+ return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
+ MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
+ false);
+}
+
static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
const char *mac, u16 fid, u16 lag_vid,
bool adding, bool dynamic)
@@ -903,6 +971,11 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
SWITCHDEV_OBJ_PORT_VLAN(obj),
trans);
break;
+ case SWITCHDEV_OBJ_ID_IPV4_FIB:
+ err = mlxsw_sp_router_fib4_add(mlxsw_sp_port,
+ SWITCHDEV_OBJ_IPV4_FIB(obj),
+ trans);
+ break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_add(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj),
@@ -921,34 +994,15 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
return err;
}
-static int mlxsw_sp_port_kill_vids(struct net_device *dev, u16 vid_begin,
- u16 vid_end)
-{
- u16 vid;
- int err;
-
- for (vid = vid_begin; vid <= vid_end; vid++) {
- err = mlxsw_sp_port_kill_vid(dev, 0, vid);
- if (err)
- return err;
- }
-
- return 0;
-}
-
static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end, bool init)
+ u16 vid_begin, u16 vid_end)
{
struct net_device *dev = mlxsw_sp_port->dev;
u16 vid, pvid;
int err;
- /* In case this is invoked with BRIDGE_FLAGS_SELF and port is
- * not bridged, then prevent packets ingressing through the
- * port with the specified VIDs from being trapped to CPU.
- */
- if (!init && !mlxsw_sp_port->bridged)
- return mlxsw_sp_port_kill_vids(dev, vid_begin, vid_end);
+ if (!mlxsw_sp_port->bridged)
+ return -EINVAL;
err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
false, false);
@@ -958,9 +1012,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
- if (init)
- goto out;
-
pvid = mlxsw_sp_port->pvid;
if (pvid >= vid_begin && pvid <= vid_end) {
err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
@@ -970,23 +1021,8 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
}
}
- err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
- false, false);
- if (err) {
- netdev_err(dev, "Failed to clear flooding\n");
- return err;
- }
+ mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
- for (vid = vid_begin; vid <= vid_end; vid++) {
- /* Remove FID mapping in case of Virtual mode */
- err = mlxsw_sp_port_fid_unmap(mlxsw_sp_port, vid);
- if (err) {
- netdev_err(dev, "Failed to unmap FID=%d", vid);
- return err;
- }
- }
-
-out:
/* Changing activity bits only if HW operation succeded */
for (vid = vid_begin; vid <= vid_end; vid++)
clear_bit(vid, mlxsw_sp_port->active_vlans);
@@ -997,8 +1033,8 @@ out:
static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan)
{
- return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
- vlan->vid_begin, vlan->vid_end, false);
+ return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
+ vlan->vid_end);
}
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -1006,7 +1042,7 @@ void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
u16 vid;
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
- __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
+ __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
}
static int
@@ -1081,6 +1117,10 @@ static int mlxsw_sp_port_obj_del(struct net_device *dev,
err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_VLAN(obj));
break;
+ case SWITCHDEV_OBJ_ID_IPV4_FIB:
+ err = mlxsw_sp_router_fib4_del(mlxsw_sp_port,
+ SWITCHDEV_OBJ_IPV4_FIB(obj));
+ break;
case SWITCHDEV_OBJ_ID_PORT_FDB:
err = mlxsw_sp_port_fdb_static_del(mlxsw_sp_port,
SWITCHDEV_OBJ_PORT_FDB(obj));
@@ -1118,7 +1158,8 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_port *tmp;
- u16 vport_fid = 0;
+ struct mlxsw_sp_fid *f;
+ u16 vport_fid;
char *sfd_pl;
char mac[ETH_ALEN];
u16 fid;
@@ -1133,12 +1174,8 @@ static int mlxsw_sp_port_fdb_dump(struct mlxsw_sp_port *mlxsw_sp_port,
if (!sfd_pl)
return -ENOMEM;
- if (mlxsw_sp_port_is_vport(mlxsw_sp_port)) {
- u16 tmp;
-
- tmp = mlxsw_sp_vport_vfid_get(mlxsw_sp_port);
- vport_fid = mlxsw_sp_vfid_to_fid(tmp);
- }
+ f = mlxsw_sp_vport_fid_get(mlxsw_sp_port);
+ vport_fid = f ? f->fid : 0;
mlxsw_reg_sfd_pack(sfd_pl, MLXSW_REG_SFD_OP_QUERY_DUMP, 0);
do {
@@ -1310,11 +1347,10 @@ static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
}
if (mlxsw_sp_fid_is_vfid(fid)) {
- u16 vfid = mlxsw_sp_fid_to_vfid(fid);
struct mlxsw_sp_port *mlxsw_sp_vport;
- mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
- vfid);
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
+ fid);
if (!mlxsw_sp_vport) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
goto just_remove;
@@ -1370,11 +1406,10 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
}
if (mlxsw_sp_fid_is_vfid(fid)) {
- u16 vfid = mlxsw_sp_fid_to_vfid(fid);
struct mlxsw_sp_port *mlxsw_sp_vport;
- mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_vfid(mlxsw_sp_port,
- vfid);
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find_by_fid(mlxsw_sp_port,
+ fid);
if (!mlxsw_sp_vport) {
netdev_err(mlxsw_sp_port->dev, "Failed to find a matching vPort following FDB notification\n");
goto just_remove;
@@ -1438,8 +1473,8 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
{
- schedule_delayed_work(&mlxsw_sp->fdb_notify.dw,
- msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
+ mlxsw_core_schedule_dw(&mlxsw_sp->fdb_notify.dw,
+ msecs_to_jiffies(mlxsw_sp->fdb_notify.interval));
}
static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
@@ -1495,14 +1530,6 @@ static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
cancel_delayed_work_sync(&mlxsw_sp->fdb_notify.dw);
}
-static void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp)
-{
- u16 fid;
-
- for_each_set_bit(fid, mlxsw_sp->active_fids, VLAN_N_VID)
- mlxsw_sp_fid_destroy(mlxsw_sp, fid);
-}
-
int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
{
return mlxsw_sp_fdb_init(mlxsw_sp);
@@ -1511,33 +1538,6 @@ int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
{
mlxsw_sp_fdb_fini(mlxsw_sp);
- mlxsw_sp_fids_fini(mlxsw_sp);
-}
-
-int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- int err;
-
- /* Allow only untagged packets to ingress and tag them internally
- * with VID 1.
- */
- mlxsw_sp_port->pvid = 1;
- err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1,
- true);
- if (err) {
- netdev_err(dev, "Unable to init VLANs\n");
- return err;
- }
-
- /* Add implicit VLAN interface in the device, so that untagged
- * packets will be classified to the default vFID.
- */
- err = mlxsw_sp_port_add_vid(dev, 0, 1);
- if (err)
- netdev_err(dev, "Failed to configure default vFID\n");
-
- return err;
}
void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 7a60a26759b6..377daa4d509c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -43,7 +43,6 @@
#include <linux/device.h>
#include <linux/skbuff.h>
#include <linux/if_vlan.h>
-#include <net/devlink.h>
#include <net/switchdev.h>
#include <generated/utsrelease.h>
@@ -75,11 +74,11 @@ struct mlxsw_sx_port_pcpu_stats {
};
struct mlxsw_sx_port {
+ struct mlxsw_core_port core_port; /* must be first */
struct net_device *dev;
struct mlxsw_sx_port_pcpu_stats __percpu *pcpu_stats;
struct mlxsw_sx *mlxsw_sx;
u8 local_port;
- struct devlink_port devlink_port;
};
/* tx_hdr_version
@@ -303,7 +302,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
u64 len;
int err;
- if (mlxsw_core_skb_transmit_busy(mlxsw_sx, &tx_info))
+ if (mlxsw_core_skb_transmit_busy(mlxsw_sx->core, &tx_info))
return NETDEV_TX_BUSY;
if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
@@ -317,11 +316,14 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
}
}
mlxsw_sx_txhdr_construct(skb, &tx_info);
- len = skb->len;
+ /* TX header is consumed by HW on the way so we shouldn't count its
+ * bytes as being sent.
+ */
+ len = skb->len - MLXSW_TXHDR_LEN;
/* Due to a race we might fail here because of a full queue. In that
* unlikely case we simply drop the packet.
*/
- err = mlxsw_core_skb_transmit(mlxsw_sx, skb, &tx_info);
+ err = mlxsw_core_skb_transmit(mlxsw_sx->core, skb, &tx_info);
if (!err) {
pcpu_stats = this_cpu_ptr(mlxsw_sx_port->pcpu_stats);
@@ -518,7 +520,8 @@ static void mlxsw_sx_port_get_stats(struct net_device *dev,
int i;
int err;
- mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port);
+ mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sx_port->local_port,
+ MLXSW_REG_PPCNT_IEEE_8023_CNT, 0);
err = mlxsw_reg_query(mlxsw_sx->core, MLXSW_REG(ppcnt), ppcnt_pl);
for (i = 0; i < MLXSW_SX_PORT_HW_STATS_LEN; i++)
data[i] = !err ? mlxsw_sx_port_hw_stats[i].getter(ppcnt_pl) : 0;
@@ -955,9 +958,7 @@ mlxsw_sx_port_mac_learning_mode_set(struct mlxsw_sx_port *mlxsw_sx_port,
static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
{
- struct devlink *devlink = priv_to_devlink(mlxsw_sx->core);
struct mlxsw_sx_port *mlxsw_sx_port;
- struct devlink_port *devlink_port;
struct net_device *dev;
bool usable;
int err;
@@ -1011,14 +1012,6 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
goto port_not_usable;
}
- devlink_port = &mlxsw_sx_port->devlink_port;
- err = devlink_port_register(devlink, devlink_port, local_port);
- if (err) {
- dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to register devlink port\n",
- mlxsw_sx_port->local_port);
- goto err_devlink_port_register;
- }
-
err = mlxsw_sx_port_system_port_mapping_set(mlxsw_sx_port);
if (err) {
dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to set system port mapping\n",
@@ -1076,11 +1069,19 @@ static int mlxsw_sx_port_create(struct mlxsw_sx *mlxsw_sx, u8 local_port)
goto err_register_netdev;
}
- devlink_port_type_eth_set(devlink_port, dev);
+ err = mlxsw_core_port_init(mlxsw_sx->core, &mlxsw_sx_port->core_port,
+ mlxsw_sx_port->local_port, dev, false, 0);
+ if (err) {
+ dev_err(mlxsw_sx->bus_info->dev, "Port %d: Failed to init core port\n",
+ mlxsw_sx_port->local_port);
+ goto err_core_port_init;
+ }
mlxsw_sx->ports[local_port] = mlxsw_sx_port;
return 0;
+err_core_port_init:
+ unregister_netdev(dev);
err_register_netdev:
err_port_mac_learning_mode_set:
err_port_stp_state_set:
@@ -1089,8 +1090,6 @@ err_port_mtu_set:
err_port_speed_set:
err_port_swid_set:
err_port_system_port_mapping_set:
- devlink_port_unregister(&mlxsw_sx_port->devlink_port);
-err_devlink_port_register:
port_not_usable:
err_port_module_check:
err_dev_addr_get:
@@ -1103,15 +1102,12 @@ err_alloc_stats:
static void mlxsw_sx_port_remove(struct mlxsw_sx *mlxsw_sx, u8 local_port)
{
struct mlxsw_sx_port *mlxsw_sx_port = mlxsw_sx->ports[local_port];
- struct devlink_port *devlink_port;
if (!mlxsw_sx_port)
return;
- devlink_port = &mlxsw_sx_port->devlink_port;
- devlink_port_type_clear(devlink_port);
+ mlxsw_core_port_fini(&mlxsw_sx_port->core_port);
unregister_netdev(mlxsw_sx_port->dev); /* This calls ndo_stop */
mlxsw_sx_port_swid_set(mlxsw_sx_port, MLXSW_PORT_SWID_DISABLED_PORT);
- devlink_port_unregister(devlink_port);
free_percpu(mlxsw_sx_port->pcpu_stats);
free_netdev(mlxsw_sx_port->dev);
}
@@ -1454,10 +1450,10 @@ static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
return mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(sgcr), sgcr_pl);
}
-static int mlxsw_sx_init(void *priv, struct mlxsw_core *mlxsw_core,
+static int mlxsw_sx_init(struct mlxsw_core *mlxsw_core,
const struct mlxsw_bus_info *mlxsw_bus_info)
{
- struct mlxsw_sx *mlxsw_sx = priv;
+ struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
int err;
mlxsw_sx->core = mlxsw_core;
@@ -1504,9 +1500,9 @@ err_event_register:
return err;
}
-static void mlxsw_sx_fini(void *priv)
+static void mlxsw_sx_fini(struct mlxsw_core *mlxsw_core)
{
- struct mlxsw_sx *mlxsw_sx = priv;
+ struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core);
mlxsw_sx_traps_fini(mlxsw_sx);
mlxsw_sx_event_unregister(mlxsw_sx, MLXSW_TRAP_ID_PUDE);
@@ -1545,6 +1541,7 @@ static struct mlxsw_config_profile mlxsw_sx_config_profile = {
.type = MLXSW_PORT_SWID_TYPE_ETH,
}
},
+ .resource_query_enable = 0,
};
static struct mlxsw_driver mlxsw_sx_driver = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 53a9550be75e..ed8e30186400 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -54,6 +54,15 @@ enum {
MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+ MLXSW_TRAP_ID_ARPBC = 0x50,
+ MLXSW_TRAP_ID_ARPUC = 0x51,
+ MLXSW_TRAP_ID_MTUERROR = 0x52,
+ MLXSW_TRAP_ID_TTLERROR = 0x53,
+ MLXSW_TRAP_ID_LBERROR = 0x54,
+ MLXSW_TRAP_ID_OSPF = 0x55,
+ MLXSW_TRAP_ID_IP2ME = 0x5F,
+ MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
+ MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
MLXSW_TRAP_ID_MAX = 0x1FF
};
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index a8522d8af95d..20cb85bc0c5f 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1354,6 +1354,7 @@ ks8695_probe(struct platform_device *pdev)
struct resource *rxirq_res, *txirq_res, *linkirq_res;
int ret = 0;
int buff_n;
+ bool inv_mac_addr = false;
u32 machigh, maclow;
/* Initialise a net_device */
@@ -1456,8 +1457,7 @@ ks8695_probe(struct platform_device *pdev)
ndev->dev_addr[5] = maclow & 0xFF;
if (!is_valid_ether_addr(ndev->dev_addr))
- dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please "
- "set using ifconfig\n", ndev->name);
+ inv_mac_addr = true;
/* In order to be efficient memory-wise, we allocate both
* rings in one go.
@@ -1520,6 +1520,9 @@ ks8695_probe(struct platform_device *pdev)
ret = register_netdev(ndev);
if (ret == 0) {
+ if (inv_mac_addr)
+ dev_warn(ksp->dev, "%s: Invalid ethernet MAC address. Please set using ip\n",
+ ndev->name);
dev_info(ksp->dev, "ks8695 ethernet (%s) MAC: %pM\n",
ks8695_port_type(ksp), ndev->dev_addr);
} else {
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 75dc46c5fca2..280e761d3a97 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4790,7 +4790,7 @@ static void transmit_cleanup(struct dev_info *hw_priv, int normal)
/* Notify the network subsystem that the packet has been sent. */
if (dev)
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
}
/**
@@ -4965,7 +4965,7 @@ static void netdev_tx_timeout(struct net_device *dev)
hw_ena_intr(hw);
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 86ea17e7ba7b..0a26b11ca8f6 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -28,11 +28,12 @@
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
+#include <linux/of_net.h>
#include "enc28j60_hw.h"
#define DRV_NAME "enc28j60"
-#define DRV_VERSION "1.01"
+#define DRV_VERSION "1.02"
#define SPI_OPLEN 1
@@ -89,22 +90,26 @@ spi_read_buf(struct enc28j60_net *priv, int len, u8 *data)
{
u8 *rx_buf = priv->spi_transfer_buf + 4;
u8 *tx_buf = priv->spi_transfer_buf;
- struct spi_transfer t = {
+ struct spi_transfer tx = {
.tx_buf = tx_buf,
+ .len = SPI_OPLEN,
+ };
+ struct spi_transfer rx = {
.rx_buf = rx_buf,
- .len = SPI_OPLEN + len,
+ .len = len,
};
struct spi_message msg;
int ret;
tx_buf[0] = ENC28J60_READ_BUF_MEM;
- tx_buf[1] = tx_buf[2] = tx_buf[3] = 0; /* don't care */
spi_message_init(&msg);
- spi_message_add_tail(&t, &msg);
+ spi_message_add_tail(&tx, &msg);
+ spi_message_add_tail(&rx, &msg);
+
ret = spi_sync(priv->spi, &msg);
if (ret == 0) {
- memcpy(data, &rx_buf[SPI_OPLEN], len);
+ memcpy(data, rx_buf, len);
ret = msg.status;
}
if (ret && netif_msg_drv(priv))
@@ -1146,7 +1151,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
enc28j60_phy_read(priv, PHIR);
}
/* TX complete handler */
- if ((intflags & EIR_TXIF) != 0) {
+ if (((intflags & EIR_TXIF) != 0) &&
+ ((intflags & EIR_TXERIF) == 0)) {
bool err = false;
loop++;
if (netif_msg_intr(priv))
@@ -1198,7 +1204,7 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
enc28j60_tx_clear(ndev, true);
} else
enc28j60_tx_clear(ndev, true);
- locked_reg_bfclr(priv, EIR, EIR_TXERIF);
+ locked_reg_bfclr(priv, EIR, EIR_TXERIF | EIR_TXIF);
}
/* RX Error handler */
if ((intflags & EIR_RXERIF) != 0) {
@@ -1233,6 +1239,8 @@ static void enc28j60_irq_work_handler(struct work_struct *work)
*/
static void enc28j60_hw_tx(struct enc28j60_net *priv)
{
+ BUG_ON(!priv->tx_skb);
+
if (netif_msg_tx_queued(priv))
printk(KERN_DEBUG DRV_NAME
": Tx Packet Len:%d\n", priv->tx_skb->len);
@@ -1544,6 +1552,7 @@ static int enc28j60_probe(struct spi_device *spi)
{
struct net_device *dev;
struct enc28j60_net *priv;
+ const void *macaddr;
int ret = 0;
if (netif_msg_drv(&debug))
@@ -1575,7 +1584,12 @@ static int enc28j60_probe(struct spi_device *spi)
ret = -EIO;
goto error_irq;
}
- eth_hw_addr_random(dev);
+
+ macaddr = of_get_mac_address(spi->dev.of_node);
+ if (macaddr)
+ ether_addr_copy(dev->dev_addr, macaddr);
+ else
+ eth_hw_addr_random(dev);
enc28j60_set_hw_macaddr(dev);
/* Board setup must set the relevant edge trigger type;
@@ -1630,9 +1644,16 @@ static int enc28j60_remove(struct spi_device *spi)
return 0;
}
+static const struct of_device_id enc28j60_dt_ids[] = {
+ { .compatible = "microchip,enc28j60" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, enc28j60_dt_ids);
+
static struct spi_driver enc28j60_driver = {
.driver = {
- .name = DRV_NAME,
+ .name = DRV_NAME,
+ .of_match_table = enc28j60_dt_ids,
},
.probe = enc28j60_probe,
.remove = enc28j60_remove,
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 7df318346b05..42e34076d2de 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -874,7 +874,7 @@ static netdev_tx_t encx24j600_tx(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev);
/* save the timestamp */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* Remember the skb for deferred processing */
priv->tx_skb = skb;
@@ -890,7 +890,7 @@ static void encx24j600_tx_timeout(struct net_device *dev)
struct encx24j600_priv *priv = netdev_priv(dev);
netif_err(priv, tx_err, dev, "TX timeout at %ld, latency %ld\n",
- jiffies, jiffies - dev->trans_start);
+ jiffies, jiffies - dev_trans_start(dev));
dev->stats.tx_errors++;
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 3e67f451f2ab..4367dd6879a2 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -376,7 +376,7 @@ static int moxart_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
priv->tx_head = TX_NEXT(tx_head);
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
ret = NETDEV_TX_OK;
out_unlock:
spin_unlock_irq(&priv->txlock);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 122c2ee3dfe2..ed89029ff75b 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -1904,7 +1904,7 @@ static void ns_tx_timeout(struct net_device *dev)
spin_unlock_irq(&np->lock);
enable_irq(irq);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 1bd419dbda6d..612c7a44b26c 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -174,7 +174,7 @@ static void sonic_tx_timeout(struct net_device *dev)
/* Try to restart the adaptor. */
sonic_init(dev);
lp->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 9ba975853ec6..eaa37c079a7c 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -4021,7 +4021,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags = 0;
u16 vlan_tag = 0;
struct fifo_info *fifo = NULL;
- int do_spin_lock = 1;
int offload_type;
int enable_per_list_interrupt = 0;
struct config_param *config = &sp->config;
@@ -4074,7 +4073,6 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
queue += sp->udp_fifo_idx;
if (skb->len > 1024)
enable_per_list_interrupt = 1;
- do_spin_lock = 0;
}
}
}
@@ -4084,12 +4082,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
[skb->priority & (MAX_TX_FIFOS - 1)];
fifo = &mac_control->fifos[queue];
- if (do_spin_lock)
- spin_lock_irqsave(&fifo->tx_lock, flags);
- else {
- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
- return NETDEV_TX_LOCKED;
- }
+ spin_lock_irqsave(&fifo->tx_lock, flags);
if (sp->config.multiq) {
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
@@ -7419,7 +7412,7 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) &&
((!ring_data->lro) ||
- (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) &&
+ (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG))) &&
(dev->features & NETIF_F_RXCSUM)) {
l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index 75683fb26734..690635660195 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -59,11 +59,11 @@
netdev_warn((nn)->netdev, fmt, ## args); \
} while (0)
-/* Max time to wait for NFP to respond on updates (in ms) */
-#define NFP_NET_POLL_TIMEOUT 5000
+/* Max time to wait for NFP to respond on updates (in seconds) */
+#define NFP_NET_POLL_TIMEOUT 5
/* Bar allocation */
-#define NFP_NET_CRTL_BAR 0
+#define NFP_NET_CTRL_BAR 0
#define NFP_NET_Q0_BAR 2
#define NFP_NET_Q1_BAR 4 /* OBSOLETE */
@@ -298,6 +298,8 @@ struct nfp_net_rx_buf {
* @rxds: Virtual address of FL/RX ring in host memory
* @dma: DMA address of the FL/RX ring
* @size: Size, in bytes, of the FL/RX ring (needed to free)
+ * @bufsz: Buffer allocation size for convenience of management routines
+ * (NOTE: this is in second cache line, do not use on fast path!)
*/
struct nfp_net_rx_ring {
struct nfp_net_r_vector *r_vec;
@@ -319,6 +321,7 @@ struct nfp_net_rx_ring {
dma_addr_t dma;
unsigned int size;
+ unsigned int bufsz;
} ____cacheline_aligned;
/**
@@ -444,6 +447,10 @@ static inline bool nfp_net_fw_ver_eq(struct nfp_net_fw_version *fw_ver,
* @shared_name: Name for shared interrupt
* @me_freq_mhz: ME clock_freq (MHz)
* @reconfig_lock: Protects HW reconfiguration request regs/machinery
+ * @reconfig_posted: Pending reconfig bits coming from async sources
+ * @reconfig_timer_active: Timer for reading reconfiguration results is pending
+ * @reconfig_sync_present: Some thread is performing synchronous reconfig
+ * @reconfig_timer: Timer for async reading of reconfig results
* @link_up: Is the link up?
* @link_status_lock: Protects @link_up and ensures atomicity with BAR reading
* @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter
@@ -472,6 +479,9 @@ struct nfp_net {
u32 rx_offset;
+ struct nfp_net_tx_ring *tx_rings;
+ struct nfp_net_rx_ring *rx_rings;
+
#ifdef CONFIG_PCI_IOV
unsigned int num_vfs;
struct vf_data_storage *vfinfo;
@@ -504,9 +514,6 @@ struct nfp_net {
int txd_cnt;
int rxd_cnt;
- struct nfp_net_tx_ring tx_rings[NFP_NET_MAX_TX_RINGS];
- struct nfp_net_rx_ring rx_rings[NFP_NET_MAX_RX_RINGS];
-
u8 num_irqs;
u8 num_r_vecs;
struct nfp_net_r_vector r_vecs[NFP_NET_MAX_TX_RINGS];
@@ -528,6 +535,10 @@ struct nfp_net {
spinlock_t link_status_lock;
spinlock_t reconfig_lock;
+ u32 reconfig_posted;
+ bool reconfig_timer_active;
+ bool reconfig_sync_present;
+ struct timer_list reconfig_timer;
u32 rx_coalesce_usecs;
u32 rx_coalesce_max_frames;
@@ -721,6 +732,7 @@ void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
int nfp_net_irqs_alloc(struct nfp_net *nn);
void nfp_net_irqs_disable(struct nfp_net *nn);
+int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt);
#ifdef CONFIG_NFP_NET_DEBUG
void nfp_net_debugfs_create(void);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 43c618bafdb6..39dadfca84ef 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -41,7 +41,6 @@
* Chris Telfer <chris.telfer@netronome.com>
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -80,6 +79,116 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
put_unaligned_le32(reg, fw_ver);
}
+/* Firmware reconfig
+ *
+ * Firmware reconfig may take a while so we have two versions of it -
+ * synchronous and asynchronous (posted). All synchronous callers are holding
+ * RTNL so we don't have to worry about serializing them.
+ */
+static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update)
+{
+ nn_writel(nn, NFP_NET_CFG_UPDATE, update);
+ /* ensure update is written before pinging HW */
+ nn_pci_flush(nn);
+ nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
+}
+
+/* Pass 0 as update to run posted reconfigs. */
+static void nfp_net_reconfig_start_async(struct nfp_net *nn, u32 update)
+{
+ update |= nn->reconfig_posted;
+ nn->reconfig_posted = 0;
+
+ nfp_net_reconfig_start(nn, update);
+
+ nn->reconfig_timer_active = true;
+ mod_timer(&nn->reconfig_timer, jiffies + NFP_NET_POLL_TIMEOUT * HZ);
+}
+
+static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check)
+{
+ u32 reg;
+
+ reg = nn_readl(nn, NFP_NET_CFG_UPDATE);
+ if (reg == 0)
+ return true;
+ if (reg & NFP_NET_CFG_UPDATE_ERR) {
+ nn_err(nn, "Reconfig error: 0x%08x\n", reg);
+ return true;
+ } else if (last_check) {
+ nn_err(nn, "Reconfig timeout: 0x%08x\n", reg);
+ return true;
+ }
+
+ return false;
+}
+
+static int nfp_net_reconfig_wait(struct nfp_net *nn, unsigned long deadline)
+{
+ bool timed_out = false;
+
+ /* Poll update field, waiting for NFP to ack the config */
+ while (!nfp_net_reconfig_check_done(nn, timed_out)) {
+ msleep(1);
+ timed_out = time_is_before_eq_jiffies(deadline);
+ }
+
+ if (nn_readl(nn, NFP_NET_CFG_UPDATE) & NFP_NET_CFG_UPDATE_ERR)
+ return -EIO;
+
+ return timed_out ? -EIO : 0;
+}
+
+static void nfp_net_reconfig_timer(unsigned long data)
+{
+ struct nfp_net *nn = (void *)data;
+
+ spin_lock_bh(&nn->reconfig_lock);
+
+ nn->reconfig_timer_active = false;
+
+ /* If sync caller is present it will take over from us */
+ if (nn->reconfig_sync_present)
+ goto done;
+
+ /* Read reconfig status and report errors */
+ nfp_net_reconfig_check_done(nn, true);
+
+ if (nn->reconfig_posted)
+ nfp_net_reconfig_start_async(nn, 0);
+done:
+ spin_unlock_bh(&nn->reconfig_lock);
+}
+
+/**
+ * nfp_net_reconfig_post() - Post async reconfig request
+ * @nn: NFP Net device to reconfigure
+ * @update: The value for the update field in the BAR config
+ *
+ * Record FW reconfiguration request. Reconfiguration will be kicked off
+ * whenever reconfiguration machinery is idle. Multiple requests can be
+ * merged together!
+ */
+static void nfp_net_reconfig_post(struct nfp_net *nn, u32 update)
+{
+ spin_lock_bh(&nn->reconfig_lock);
+
+ /* Sync caller will kick off async reconf when it's done, just post */
+ if (nn->reconfig_sync_present) {
+ nn->reconfig_posted |= update;
+ goto done;
+ }
+
+ /* Opportunistically check if the previous command is done */
+ if (!nn->reconfig_timer_active ||
+ nfp_net_reconfig_check_done(nn, false))
+ nfp_net_reconfig_start_async(nn, update);
+ else
+ nn->reconfig_posted |= update;
+done:
+ spin_unlock_bh(&nn->reconfig_lock);
+}
+
/**
* nfp_net_reconfig() - Reconfigure the firmware
* @nn: NFP Net device to reconfigure
@@ -93,35 +202,45 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver,
*/
int nfp_net_reconfig(struct nfp_net *nn, u32 update)
{
- int cnt, ret = 0;
- u32 new;
+ bool cancelled_timer = false;
+ u32 pre_posted_requests;
+ int ret;
spin_lock_bh(&nn->reconfig_lock);
- nn_writel(nn, NFP_NET_CFG_UPDATE, update);
- /* ensure update is written before pinging HW */
- nn_pci_flush(nn);
- nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1);
+ nn->reconfig_sync_present = true;
- /* Poll update field, waiting for NFP to ack the config */
- for (cnt = 0; ; cnt++) {
- new = nn_readl(nn, NFP_NET_CFG_UPDATE);
- if (new == 0)
- break;
- if (new & NFP_NET_CFG_UPDATE_ERR) {
- nn_err(nn, "Reconfig error: 0x%08x\n", new);
- ret = -EIO;
- break;
- } else if (cnt >= NFP_NET_POLL_TIMEOUT) {
- nn_err(nn, "Reconfig timeout for 0x%08x after %dms\n",
- update, cnt);
- ret = -EIO;
- break;
- }
- mdelay(1);
+ if (nn->reconfig_timer_active) {
+ del_timer(&nn->reconfig_timer);
+ nn->reconfig_timer_active = false;
+ cancelled_timer = true;
}
+ pre_posted_requests = nn->reconfig_posted;
+ nn->reconfig_posted = 0;
spin_unlock_bh(&nn->reconfig_lock);
+
+ if (cancelled_timer)
+ nfp_net_reconfig_wait(nn, nn->reconfig_timer.expires);
+
+ /* Run the posted reconfigs which were issued before we started */
+ if (pre_posted_requests) {
+ nfp_net_reconfig_start(nn, pre_posted_requests);
+ nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
+ }
+
+ nfp_net_reconfig_start(nn, update);
+ ret = nfp_net_reconfig_wait(nn, jiffies + HZ * NFP_NET_POLL_TIMEOUT);
+
+ spin_lock_bh(&nn->reconfig_lock);
+
+ if (nn->reconfig_posted)
+ nfp_net_reconfig_start_async(nn, 0);
+
+ nn->reconfig_sync_present = false;
+
+ spin_unlock_bh(&nn->reconfig_lock);
+
return ret;
}
@@ -347,12 +466,18 @@ static irqreturn_t nfp_net_irq_exn(int irq, void *data)
/**
* nfp_net_tx_ring_init() - Fill in the boilerplate for a TX ring
* @tx_ring: TX ring structure
+ * @r_vec: IRQ vector servicing this ring
+ * @idx: Ring index
*/
-static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring)
+static void
+nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring,
+ struct nfp_net_r_vector *r_vec, unsigned int idx)
{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
+ tx_ring->idx = idx;
+ tx_ring->r_vec = r_vec;
+
tx_ring->qcidx = tx_ring->idx * nn->stride_tx;
tx_ring->qcp_q = nn->tx_bar + NFP_QCP_QUEUE_OFF(tx_ring->qcidx);
}
@@ -360,12 +485,18 @@ static void nfp_net_tx_ring_init(struct nfp_net_tx_ring *tx_ring)
/**
* nfp_net_rx_ring_init() - Fill in the boilerplate for a RX ring
* @rx_ring: RX ring structure
+ * @r_vec: IRQ vector servicing this ring
+ * @idx: Ring index
*/
-static void nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring)
+static void
+nfp_net_rx_ring_init(struct nfp_net_rx_ring *rx_ring,
+ struct nfp_net_r_vector *r_vec, unsigned int idx)
{
- struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
+ rx_ring->idx = idx;
+ rx_ring->r_vec = r_vec;
+
rx_ring->fl_qcidx = rx_ring->idx * nn->stride_rx;
rx_ring->rx_qcidx = rx_ring->fl_qcidx + (nn->stride_rx - 1);
@@ -401,16 +532,6 @@ static void nfp_net_irqs_assign(struct net_device *netdev)
r_vec->irq_idx = NFP_NET_NON_Q_VECTORS + r;
cpumask_set_cpu(r, &r_vec->affinity_mask);
-
- r_vec->tx_ring = &nn->tx_rings[r];
- nn->tx_rings[r].idx = r;
- nn->tx_rings[r].r_vec = r_vec;
- nfp_net_tx_ring_init(r_vec->tx_ring);
-
- r_vec->rx_ring = &nn->rx_rings[r];
- nn->rx_rings[r].idx = r;
- nn->rx_rings[r].r_vec = r_vec;
- nfp_net_rx_ring_init(r_vec->rx_ring);
}
}
@@ -865,61 +986,59 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
}
/**
- * nfp_net_tx_flush() - Free any untransmitted buffers currently on the TX ring
- * @tx_ring: TX ring structure
+ * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
+ * @nn: NFP Net device
+ * @tx_ring: TX ring structure
*
* Assumes that the device is stopped
*/
-static void nfp_net_tx_flush(struct nfp_net_tx_ring *tx_ring)
+static void
+nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
{
- struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
- struct nfp_net *nn = r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
const struct skb_frag_struct *frag;
struct netdev_queue *nd_q;
- struct sk_buff *skb;
- int nr_frags;
- int fidx;
- int idx;
+ struct pci_dev *pdev = nn->pdev;
while (tx_ring->rd_p != tx_ring->wr_p) {
- idx = tx_ring->rd_p % tx_ring->cnt;
+ int nr_frags, fidx, idx;
+ struct sk_buff *skb;
+ idx = tx_ring->rd_p % tx_ring->cnt;
skb = tx_ring->txbufs[idx].skb;
- if (skb) {
- nr_frags = skb_shinfo(skb)->nr_frags;
- fidx = tx_ring->txbufs[idx].fidx;
-
- if (fidx == -1) {
- /* unmap head */
- dma_unmap_single(&pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
- skb_headlen(skb),
- DMA_TO_DEVICE);
- } else {
- /* unmap fragment */
- frag = &skb_shinfo(skb)->frags[fidx];
- dma_unmap_page(&pdev->dev,
- tx_ring->txbufs[idx].dma_addr,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- }
-
- /* check for last gather fragment */
- if (fidx == nr_frags - 1)
- dev_kfree_skb_any(skb);
-
- tx_ring->txbufs[idx].dma_addr = 0;
- tx_ring->txbufs[idx].skb = NULL;
- tx_ring->txbufs[idx].fidx = -2;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ fidx = tx_ring->txbufs[idx].fidx;
+
+ if (fidx == -1) {
+ /* unmap head */
+ dma_unmap_single(&pdev->dev,
+ tx_ring->txbufs[idx].dma_addr,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ } else {
+ /* unmap fragment */
+ frag = &skb_shinfo(skb)->frags[fidx];
+ dma_unmap_page(&pdev->dev,
+ tx_ring->txbufs[idx].dma_addr,
+ skb_frag_size(frag), DMA_TO_DEVICE);
}
- memset(&tx_ring->txds[idx], 0, sizeof(tx_ring->txds[idx]));
+ /* check for last gather fragment */
+ if (fidx == nr_frags - 1)
+ dev_kfree_skb_any(skb);
+
+ tx_ring->txbufs[idx].dma_addr = 0;
+ tx_ring->txbufs[idx].skb = NULL;
+ tx_ring->txbufs[idx].fidx = -2;
tx_ring->qcp_rd_p++;
tx_ring->rd_p++;
}
+ memset(tx_ring->txds, 0, sizeof(*tx_ring->txds) * tx_ring->cnt);
+ tx_ring->wr_p = 0;
+ tx_ring->rd_p = 0;
+ tx_ring->qcp_rd_p = 0;
+ tx_ring->wr_ptr_add = 0;
+
nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
netdev_tx_reset_queue(nd_q);
}
@@ -957,25 +1076,27 @@ static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring)
* nfp_net_rx_alloc_one() - Allocate and map skb for RX
* @rx_ring: RX ring structure of the skb
* @dma_addr: Pointer to storage for DMA address (output param)
+ * @fl_bufsz: size of freelist buffers
*
* This function will allcate a new skb, map it for DMA.
*
* Return: allocated skb or NULL on failure.
*/
static struct sk_buff *
-nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr)
+nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
+ unsigned int fl_bufsz)
{
struct nfp_net *nn = rx_ring->r_vec->nfp_net;
struct sk_buff *skb;
- skb = netdev_alloc_skb(nn->netdev, nn->fl_bufsz);
+ skb = netdev_alloc_skb(nn->netdev, fl_bufsz);
if (!skb) {
nn_warn_ratelimit(nn, "Failed to alloc receive SKB\n");
return NULL;
}
*dma_addr = dma_map_single(&nn->pdev->dev, skb->data,
- nn->fl_bufsz, DMA_FROM_DEVICE);
+ fl_bufsz, DMA_FROM_DEVICE);
if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
dev_kfree_skb_any(skb);
nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
@@ -1020,62 +1141,101 @@ static void nfp_net_rx_give_one(struct nfp_net_rx_ring *rx_ring,
}
/**
- * nfp_net_rx_flush() - Free any buffers currently on the RX ring
- * @rx_ring: RX ring to remove buffers from
+ * nfp_net_rx_ring_reset() - Reflect in SW state of freelist after disable
+ * @rx_ring: RX ring structure
*
- * Assumes that the device is stopped
+ * Warning: Do *not* call if ring buffers were never put on the FW freelist
+ * (i.e. device was not enabled)!
*/
-static void nfp_net_rx_flush(struct nfp_net_rx_ring *rx_ring)
+static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
{
- struct nfp_net *nn = rx_ring->r_vec->nfp_net;
- struct pci_dev *pdev = nn->pdev;
- int idx;
+ unsigned int wr_idx, last_idx;
- while (rx_ring->rd_p != rx_ring->wr_p) {
- idx = rx_ring->rd_p % rx_ring->cnt;
+ /* Move the empty entry to the end of the list */
+ wr_idx = rx_ring->wr_p % rx_ring->cnt;
+ last_idx = rx_ring->cnt - 1;
+ rx_ring->rxbufs[wr_idx].dma_addr = rx_ring->rxbufs[last_idx].dma_addr;
+ rx_ring->rxbufs[wr_idx].skb = rx_ring->rxbufs[last_idx].skb;
+ rx_ring->rxbufs[last_idx].dma_addr = 0;
+ rx_ring->rxbufs[last_idx].skb = NULL;
- if (rx_ring->rxbufs[idx].skb) {
- dma_unmap_single(&pdev->dev,
- rx_ring->rxbufs[idx].dma_addr,
- nn->fl_bufsz, DMA_FROM_DEVICE);
- dev_kfree_skb_any(rx_ring->rxbufs[idx].skb);
- rx_ring->rxbufs[idx].dma_addr = 0;
- rx_ring->rxbufs[idx].skb = NULL;
- }
+ memset(rx_ring->rxds, 0, sizeof(*rx_ring->rxds) * rx_ring->cnt);
+ rx_ring->wr_p = 0;
+ rx_ring->rd_p = 0;
+ rx_ring->wr_ptr_add = 0;
+}
- memset(&rx_ring->rxds[idx], 0, sizeof(rx_ring->rxds[idx]));
+/**
+ * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
+ * @nn: NFP Net device
+ * @rx_ring: RX ring to remove buffers from
+ *
+ * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
+ * entries. After device is disabled nfp_net_rx_ring_reset() must be called
+ * to restore required ring geometry.
+ */
+static void
+nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
+{
+ struct pci_dev *pdev = nn->pdev;
+ unsigned int i;
- rx_ring->rd_p++;
+ for (i = 0; i < rx_ring->cnt - 1; i++) {
+ /* NULL skb can only happen when initial filling of the ring
+ * fails to allocate enough buffers and calls here to free
+ * already allocated ones.
+ */
+ if (!rx_ring->rxbufs[i].skb)
+ continue;
+
+ dma_unmap_single(&pdev->dev, rx_ring->rxbufs[i].dma_addr,
+ rx_ring->bufsz, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(rx_ring->rxbufs[i].skb);
+ rx_ring->rxbufs[i].dma_addr = 0;
+ rx_ring->rxbufs[i].skb = NULL;
}
}
/**
- * nfp_net_rx_fill_freelist() - Attempt filling freelist with RX buffers
- * @rx_ring: RX ring to fill
- *
- * Try to fill as many buffers as possible into freelist. Return
- * number of buffers added.
- *
- * Return: Number of freelist buffers added.
+ * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
+ * @nn: NFP Net device
+ * @rx_ring: RX ring to remove buffers from
*/
-static int nfp_net_rx_fill_freelist(struct nfp_net_rx_ring *rx_ring)
+static int
+nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
{
- struct sk_buff *skb;
- dma_addr_t dma_addr;
+ struct nfp_net_rx_buf *rxbufs;
+ unsigned int i;
+
+ rxbufs = rx_ring->rxbufs;
- while (nfp_net_rx_space(rx_ring)) {
- skb = nfp_net_rx_alloc_one(rx_ring, &dma_addr);
- if (!skb) {
- nfp_net_rx_flush(rx_ring);
+ for (i = 0; i < rx_ring->cnt - 1; i++) {
+ rxbufs[i].skb =
+ nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
+ rx_ring->bufsz);
+ if (!rxbufs[i].skb) {
+ nfp_net_rx_ring_bufs_free(nn, rx_ring);
return -ENOMEM;
}
- nfp_net_rx_give_one(rx_ring, skb, dma_addr);
}
return 0;
}
/**
+ * nfp_net_rx_ring_fill_freelist() - Give buffers from the ring to FW
+ * @rx_ring: RX ring to fill
+ */
+static void nfp_net_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring)
+{
+ unsigned int i;
+
+ for (i = 0; i < rx_ring->cnt - 1; i++)
+ nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[i].skb,
+ rx_ring->rxbufs[i].dma_addr);
+}
+
+/**
* nfp_net_rx_csum_has_errors() - group check if rxd has any csum errors
* @flags: RX descriptor flags field in CPU byte order
*/
@@ -1240,7 +1400,8 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
skb = rx_ring->rxbufs[idx].skb;
- new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr);
+ new_skb = nfp_net_rx_alloc_one(rx_ring, &new_dma_addr,
+ nn->fl_bufsz);
if (!new_skb) {
nfp_net_rx_give_one(rx_ring, rx_ring->rxbufs[idx].skb,
rx_ring->rxbufs[idx].dma_addr);
@@ -1256,31 +1417,29 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_rx_give_one(rx_ring, new_skb, new_dma_addr);
+ /* < meta_len >
+ * <-- [rx_offset] -->
+ * ---------------------------------------------------------
+ * | [XX] | metadata | packet | XXXX |
+ * ---------------------------------------------------------
+ * <---------------- data_len --------------->
+ *
+ * The rx_offset is fixed for all packets, the meta_len can vary
+ * on a packet by packet basis. If rx_offset is set to zero
+ * (_RX_OFFSET_DYNAMIC) metadata starts at the beginning of the
+ * buffer and is immediately followed by the packet (no [XX]).
+ */
meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
data_len = le16_to_cpu(rxd->rxd.data_len);
- if (WARN_ON_ONCE(data_len > nn->fl_bufsz)) {
- dev_kfree_skb_any(skb);
- continue;
- }
-
- if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) {
- /* The packet data starts after the metadata */
+ if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
skb_reserve(skb, meta_len);
- } else {
- /* The packet data starts at a fixed offset */
+ else
skb_reserve(skb, nn->rx_offset);
- }
-
- /* Adjust the SKB for the dynamic meta data pre-pended */
skb_put(skb, data_len - meta_len);
nfp_net_set_hash(nn->netdev, skb, rxd);
- /* Pad small frames to minimum */
- if (skb_put_padto(skb, 60))
- break;
-
/* Stats update */
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_pkts++;
@@ -1349,10 +1508,6 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
struct nfp_net *nn = r_vec->nfp_net;
struct pci_dev *pdev = nn->pdev;
- nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), 0);
- nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), 0);
- nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), 0);
-
kfree(tx_ring->txbufs);
if (tx_ring->txds)
@@ -1360,11 +1515,6 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
tx_ring->txds, tx_ring->dma);
tx_ring->cnt = 0;
- tx_ring->wr_p = 0;
- tx_ring->rd_p = 0;
- tx_ring->qcp_rd_p = 0;
- tx_ring->wr_ptr_add = 0;
-
tx_ring->txbufs = NULL;
tx_ring->txds = NULL;
tx_ring->dma = 0;
@@ -1374,17 +1524,18 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
/**
* nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
* @tx_ring: TX Ring structure to allocate
+ * @cnt: Ring buffer count
*
* Return: 0 on success, negative errno otherwise.
*/
-static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring)
+static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
struct pci_dev *pdev = nn->pdev;
int sz;
- tx_ring->cnt = nn->txd_cnt;
+ tx_ring->cnt = cnt;
tx_ring->size = sizeof(*tx_ring->txds) * tx_ring->cnt;
tx_ring->txds = dma_zalloc_coherent(&pdev->dev, tx_ring->size,
@@ -1397,11 +1548,6 @@ static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring)
if (!tx_ring->txbufs)
goto err_alloc;
- /* Write the DMA address, size and MSI-X info to the device */
- nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(tx_ring->idx), tx_ring->dma);
- nn_writeb(nn, NFP_NET_CFG_TXR_SZ(tx_ring->idx), ilog2(tx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_TXR_VEC(tx_ring->idx), r_vec->irq_idx);
-
netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx);
nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n",
@@ -1415,6 +1561,59 @@ err_alloc:
return -ENOMEM;
}
+static struct nfp_net_tx_ring *
+nfp_net_shadow_tx_rings_prepare(struct nfp_net *nn, u32 buf_cnt)
+{
+ struct nfp_net_tx_ring *rings;
+ unsigned int r;
+
+ rings = kcalloc(nn->num_tx_rings, sizeof(*rings), GFP_KERNEL);
+ if (!rings)
+ return NULL;
+
+ for (r = 0; r < nn->num_tx_rings; r++) {
+ nfp_net_tx_ring_init(&rings[r], nn->tx_rings[r].r_vec, r);
+
+ if (nfp_net_tx_ring_alloc(&rings[r], buf_cnt))
+ goto err_free_prev;
+ }
+
+ return rings;
+
+err_free_prev:
+ while (r--)
+ nfp_net_tx_ring_free(&rings[r]);
+ kfree(rings);
+ return NULL;
+}
+
+static struct nfp_net_tx_ring *
+nfp_net_shadow_tx_rings_swap(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
+{
+ struct nfp_net_tx_ring *old = nn->tx_rings;
+ unsigned int r;
+
+ for (r = 0; r < nn->num_tx_rings; r++)
+ old[r].r_vec->tx_ring = &rings[r];
+
+ nn->tx_rings = rings;
+ return old;
+}
+
+static void
+nfp_net_shadow_tx_rings_free(struct nfp_net *nn, struct nfp_net_tx_ring *rings)
+{
+ unsigned int r;
+
+ if (!rings)
+ return;
+
+ for (r = 0; r < nn->num_tx_rings; r++)
+ nfp_net_tx_ring_free(&rings[r]);
+
+ kfree(rings);
+}
+
/**
* nfp_net_rx_ring_free() - Free resources allocated to a RX ring
* @rx_ring: RX ring to free
@@ -1425,10 +1624,6 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
struct nfp_net *nn = r_vec->nfp_net;
struct pci_dev *pdev = nn->pdev;
- nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), 0);
- nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), 0);
- nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), 0);
-
kfree(rx_ring->rxbufs);
if (rx_ring->rxds)
@@ -1436,10 +1631,6 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
rx_ring->rxds, rx_ring->dma);
rx_ring->cnt = 0;
- rx_ring->wr_p = 0;
- rx_ring->rd_p = 0;
- rx_ring->wr_ptr_add = 0;
-
rx_ring->rxbufs = NULL;
rx_ring->rxds = NULL;
rx_ring->dma = 0;
@@ -1449,17 +1640,22 @@ static void nfp_net_rx_ring_free(struct nfp_net_rx_ring *rx_ring)
/**
* nfp_net_rx_ring_alloc() - Allocate resource for a RX ring
* @rx_ring: RX ring to allocate
+ * @fl_bufsz: Size of buffers to allocate
+ * @cnt: Ring buffer count
*
* Return: 0 on success, negative errno otherwise.
*/
-static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring)
+static int
+nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
+ u32 cnt)
{
struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
struct pci_dev *pdev = nn->pdev;
int sz;
- rx_ring->cnt = nn->rxd_cnt;
+ rx_ring->cnt = cnt;
+ rx_ring->bufsz = fl_bufsz;
rx_ring->size = sizeof(*rx_ring->rxds) * rx_ring->cnt;
rx_ring->rxds = dma_zalloc_coherent(&pdev->dev, rx_ring->size,
@@ -1472,11 +1668,6 @@ static int nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring)
if (!rx_ring->rxbufs)
goto err_alloc;
- /* Write the DMA address, size and MSI-X info to the device */
- nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(rx_ring->idx), rx_ring->dma);
- nn_writeb(nn, NFP_NET_CFG_RXR_SZ(rx_ring->idx), ilog2(rx_ring->cnt));
- nn_writeb(nn, NFP_NET_CFG_RXR_VEC(rx_ring->idx), r_vec->irq_idx);
-
nn_dbg(nn, "RxQ%02d: FlQCidx=%02d RxQCidx=%02d cnt=%d dma=%#llx host=%p\n",
rx_ring->idx, rx_ring->fl_qcidx, rx_ring->rx_qcidx,
rx_ring->cnt, (unsigned long long)rx_ring->dma, rx_ring->rxds);
@@ -1488,91 +1679,109 @@ err_alloc:
return -ENOMEM;
}
-static void __nfp_net_free_rings(struct nfp_net *nn, unsigned int n_free)
+static struct nfp_net_rx_ring *
+nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz,
+ u32 buf_cnt)
{
- struct nfp_net_r_vector *r_vec;
- struct msix_entry *entry;
+ struct nfp_net_rx_ring *rings;
+ unsigned int r;
- while (n_free--) {
- r_vec = &nn->r_vecs[n_free];
- entry = &nn->irq_entries[r_vec->irq_idx];
+ rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL);
+ if (!rings)
+ return NULL;
- nfp_net_rx_ring_free(r_vec->rx_ring);
- nfp_net_tx_ring_free(r_vec->tx_ring);
+ for (r = 0; r < nn->num_rx_rings; r++) {
+ nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r);
- irq_set_affinity_hint(entry->vector, NULL);
- free_irq(entry->vector, r_vec);
+ if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, buf_cnt))
+ goto err_free_prev;
- netif_napi_del(&r_vec->napi);
+ if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r]))
+ goto err_free_ring;
}
+
+ return rings;
+
+err_free_prev:
+ while (r--) {
+ nfp_net_rx_ring_bufs_free(nn, &rings[r]);
+err_free_ring:
+ nfp_net_rx_ring_free(&rings[r]);
+ }
+ kfree(rings);
+ return NULL;
}
-/**
- * nfp_net_free_rings() - Free all ring resources
- * @nn: NFP Net device to reconfigure
- */
-static void nfp_net_free_rings(struct nfp_net *nn)
+static struct nfp_net_rx_ring *
+nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
{
- __nfp_net_free_rings(nn, nn->num_r_vecs);
+ struct nfp_net_rx_ring *old = nn->rx_rings;
+ unsigned int r;
+
+ for (r = 0; r < nn->num_rx_rings; r++)
+ old[r].r_vec->rx_ring = &rings[r];
+
+ nn->rx_rings = rings;
+ return old;
}
-/**
- * nfp_net_alloc_rings() - Allocate resources for RX and TX rings
- * @nn: NFP Net device to reconfigure
- *
- * Return: 0 on success or negative errno on error.
- */
-static int nfp_net_alloc_rings(struct nfp_net *nn)
+static void
+nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings)
{
- struct nfp_net_r_vector *r_vec;
- struct msix_entry *entry;
- int err;
- int r;
+ unsigned int r;
+
+ if (!rings)
+ return;
for (r = 0; r < nn->num_r_vecs; r++) {
- r_vec = &nn->r_vecs[r];
- entry = &nn->irq_entries[r_vec->irq_idx];
-
- /* Setup NAPI */
- netif_napi_add(nn->netdev, &r_vec->napi,
- nfp_net_poll, NAPI_POLL_WEIGHT);
-
- snprintf(r_vec->name, sizeof(r_vec->name),
- "%s-rxtx-%d", nn->netdev->name, r);
- err = request_irq(entry->vector, r_vec->handler, 0,
- r_vec->name, r_vec);
- if (err) {
- nn_dbg(nn, "Error requesting IRQ %d\n", entry->vector);
- goto err_napi_del;
- }
+ nfp_net_rx_ring_bufs_free(nn, &rings[r]);
+ nfp_net_rx_ring_free(&rings[r]);
+ }
- irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
+ kfree(rings);
+}
- nn_dbg(nn, "RV%02d: irq=%03d/%03d\n",
- r, entry->vector, entry->entry);
+static int
+nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ int idx)
+{
+ struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
+ int err;
- /* Allocate TX ring resources */
- err = nfp_net_tx_ring_alloc(r_vec->tx_ring);
- if (err)
- goto err_free_irq;
+ r_vec->tx_ring = &nn->tx_rings[idx];
+ nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx);
- /* Allocate RX ring resources */
- err = nfp_net_rx_ring_alloc(r_vec->rx_ring);
- if (err)
- goto err_free_tx;
+ r_vec->rx_ring = &nn->rx_rings[idx];
+ nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
+
+ snprintf(r_vec->name, sizeof(r_vec->name),
+ "%s-rxtx-%d", nn->netdev->name, idx);
+ err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
+ if (err) {
+ nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
+ return err;
}
+ disable_irq(entry->vector);
+
+ /* Setup NAPI */
+ netif_napi_add(nn->netdev, &r_vec->napi,
+ nfp_net_poll, NAPI_POLL_WEIGHT);
+
+ irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
+
+ nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
return 0;
+}
+
+static void
+nfp_net_cleanup_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
+{
+ struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
-err_free_tx:
- nfp_net_tx_ring_free(r_vec->tx_ring);
-err_free_irq:
irq_set_affinity_hint(entry->vector, NULL);
- free_irq(entry->vector, r_vec);
-err_napi_del:
netif_napi_del(&r_vec->napi);
- __nfp_net_free_rings(nn, r);
- return err;
+ free_irq(entry->vector, r_vec);
}
/**
@@ -1631,13 +1840,14 @@ void nfp_net_coalesce_write_cfg(struct nfp_net *nn)
}
/**
- * nfp_net_write_mac_addr() - Write mac address to device registers
+ * nfp_net_write_mac_addr() - Write mac address to the device control BAR
* @nn: NFP Net device to reconfigure
- * @mac: Six-byte MAC address to be written
*
- * We do a bit of byte swapping dance because firmware is LE.
+ * Writes the MAC address from the netdev to the device control BAR. Does not
+ * perform the required reconfig. We do a bit of byte swapping dance because
+ * firmware is LE.
*/
-static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac)
+static void nfp_net_write_mac_addr(struct nfp_net *nn)
{
nn_writel(nn, NFP_NET_CFG_MACADDR + 0,
get_unaligned_be32(nn->netdev->dev_addr));
@@ -1646,6 +1856,17 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac)
get_unaligned_be16(nn->netdev->dev_addr + 4) << 16);
}
+static void nfp_net_vec_clear_ring_data(struct nfp_net *nn, unsigned int idx)
+{
+ nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), 0);
+
+ nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), 0);
+ nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), 0);
+}
+
/**
* nfp_net_clear_config_and_disable() - Clear control BAR and disable NFP
* @nn: NFP Net device to reconfigure
@@ -1653,6 +1874,7 @@ static void nfp_net_write_mac_addr(struct nfp_net *nn, const u8 *mac)
static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
{
u32 new_ctrl, update;
+ unsigned int r;
int err;
new_ctrl = nn->ctrl;
@@ -1669,79 +1891,40 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
err = nfp_net_reconfig(nn, update);
- if (err) {
+ if (err)
nn_err(nn, "Could not disable device: %d\n", err);
- return;
+
+ for (r = 0; r < nn->num_r_vecs; r++) {
+ nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring);
+ nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring);
+ nfp_net_vec_clear_ring_data(nn, r);
}
nn->ctrl = new_ctrl;
}
-/**
- * nfp_net_start_vec() - Start ring vector
- * @nn: NFP Net device structure
- * @r_vec: Ring vector to be started
- */
-static int nfp_net_start_vec(struct nfp_net *nn, struct nfp_net_r_vector *r_vec)
+static void
+nfp_net_vec_write_ring_data(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
+ unsigned int idx)
{
- unsigned int irq_vec;
- int err = 0;
-
- irq_vec = nn->irq_entries[r_vec->irq_idx].vector;
-
- disable_irq(irq_vec);
-
- err = nfp_net_rx_fill_freelist(r_vec->rx_ring);
- if (err) {
- nn_err(nn, "RV%02d: couldn't allocate enough buffers\n",
- r_vec->irq_idx);
- goto out;
- }
-
- napi_enable(&r_vec->napi);
-out:
- enable_irq(irq_vec);
+ /* Write the DMA address, size and MSI-X info to the device */
+ nn_writeq(nn, NFP_NET_CFG_RXR_ADDR(idx), r_vec->rx_ring->dma);
+ nn_writeb(nn, NFP_NET_CFG_RXR_SZ(idx), ilog2(r_vec->rx_ring->cnt));
+ nn_writeb(nn, NFP_NET_CFG_RXR_VEC(idx), r_vec->irq_idx);
- return err;
+ nn_writeq(nn, NFP_NET_CFG_TXR_ADDR(idx), r_vec->tx_ring->dma);
+ nn_writeb(nn, NFP_NET_CFG_TXR_SZ(idx), ilog2(r_vec->tx_ring->cnt));
+ nn_writeb(nn, NFP_NET_CFG_TXR_VEC(idx), r_vec->irq_idx);
}
-static int nfp_net_netdev_open(struct net_device *netdev)
+static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
{
- struct nfp_net *nn = netdev_priv(netdev);
- int err, r;
- u32 update = 0;
- u32 new_ctrl;
-
- if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
- nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
- return -EBUSY;
- }
+ u32 new_ctrl, update = 0;
+ unsigned int r;
+ int err;
new_ctrl = nn->ctrl;
- /* Step 1: Allocate resources for rings and the like
- * - Request interrupts
- * - Allocate RX and TX ring resources
- * - Setup initial RSS table
- */
- err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
- nn->exn_name, sizeof(nn->exn_name),
- NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
- if (err)
- return err;
-
- err = nfp_net_alloc_rings(nn);
- if (err)
- goto err_free_exn;
-
- err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
- if (err)
- goto err_free_rings;
-
- err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
- if (err)
- goto err_free_rings;
-
if (nn->cap & NFP_NET_CFG_CTRL_RSS) {
nfp_net_rss_write_key(nn);
nfp_net_rss_write_itbl(nn);
@@ -1756,22 +1939,18 @@ static int nfp_net_netdev_open(struct net_device *netdev)
update |= NFP_NET_CFG_UPDATE_IRQMOD;
}
- /* Step 2: Configure the NFP
- * - Enable rings from 0 to tx_rings/rx_rings - 1.
- * - Write MAC address (in case it changed)
- * - Set the MTU
- * - Set the Freelist buffer size
- * - Enable the FW
- */
+ for (r = 0; r < nn->num_r_vecs; r++)
+ nfp_net_vec_write_ring_data(nn, &nn->r_vecs[r], r);
+
nn_writeq(nn, NFP_NET_CFG_TXRS_ENABLE, nn->num_tx_rings == 64 ?
0xffffffffffffffffULL : ((u64)1 << nn->num_tx_rings) - 1);
nn_writeq(nn, NFP_NET_CFG_RXRS_ENABLE, nn->num_rx_rings == 64 ?
0xffffffffffffffffULL : ((u64)1 << nn->num_rx_rings) - 1);
- nfp_net_write_mac_addr(nn, netdev->dev_addr);
+ nfp_net_write_mac_addr(nn);
- nn_writel(nn, NFP_NET_CFG_MTU, netdev->mtu);
+ nn_writel(nn, NFP_NET_CFG_MTU, nn->netdev->mtu);
nn_writel(nn, NFP_NET_CFG_FLBUFSZ, nn->fl_bufsz);
/* Enable device */
@@ -1784,69 +1963,217 @@ static int nfp_net_netdev_open(struct net_device *netdev)
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
err = nfp_net_reconfig(nn, update);
- if (err)
- goto err_clear_config;
nn->ctrl = new_ctrl;
+ for (r = 0; r < nn->num_r_vecs; r++)
+ nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring);
+
/* Since reconfiguration requests while NFP is down are ignored we
* have to wipe the entire VXLAN configuration and reinitialize it.
*/
if (nn->ctrl & NFP_NET_CFG_CTRL_VXLAN) {
memset(&nn->vxlan_ports, 0, sizeof(nn->vxlan_ports));
memset(&nn->vxlan_usecnt, 0, sizeof(nn->vxlan_usecnt));
- vxlan_get_rx_port(netdev);
+ udp_tunnel_get_rx_info(nn->netdev);
}
- /* Step 3: Enable for kernel
- * - put some freelist descriptors on each RX ring
- * - enable NAPI on each ring
- * - enable all TX queues
- * - set link state
- */
+ return err;
+}
+
+/**
+ * nfp_net_set_config_and_enable() - Write control BAR and enable NFP
+ * @nn: NFP Net device to reconfigure
+ */
+static int nfp_net_set_config_and_enable(struct nfp_net *nn)
+{
+ int err;
+
+ err = __nfp_net_set_config_and_enable(nn);
+ if (err)
+ nfp_net_clear_config_and_disable(nn);
+
+ return err;
+}
+
+/**
+ * nfp_net_open_stack() - Start the device from stack's perspective
+ * @nn: NFP Net device to reconfigure
+ */
+static void nfp_net_open_stack(struct nfp_net *nn)
+{
+ unsigned int r;
+
for (r = 0; r < nn->num_r_vecs; r++) {
- err = nfp_net_start_vec(nn, &nn->r_vecs[r]);
- if (err)
- goto err_disable_napi;
+ napi_enable(&nn->r_vecs[r].napi);
+ enable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
}
- netif_tx_wake_all_queues(netdev);
+ netif_tx_wake_all_queues(nn->netdev);
+
+ enable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
+ nfp_net_read_link_status(nn);
+}
+
+static int nfp_net_netdev_open(struct net_device *netdev)
+{
+ struct nfp_net *nn = netdev_priv(netdev);
+ int err, r;
+
+ if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
+ nn_err(nn, "Dev is already enabled: 0x%08x\n", nn->ctrl);
+ return -EBUSY;
+ }
+ /* Step 1: Allocate resources for rings and the like
+ * - Request interrupts
+ * - Allocate RX and TX ring resources
+ * - Setup initial RSS table
+ */
+ err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_EXN, "%s-exn",
+ nn->exn_name, sizeof(nn->exn_name),
+ NFP_NET_IRQ_EXN_IDX, nn->exn_handler);
+ if (err)
+ return err;
err = nfp_net_aux_irq_request(nn, NFP_NET_CFG_LSC, "%s-lsc",
nn->lsc_name, sizeof(nn->lsc_name),
NFP_NET_IRQ_LSC_IDX, nn->lsc_handler);
if (err)
- goto err_stop_tx;
- nfp_net_read_link_status(nn);
+ goto err_free_exn;
+ disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
- return 0;
+ nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
+ GFP_KERNEL);
+ if (!nn->rx_rings) {
+ err = -ENOMEM;
+ goto err_free_lsc;
+ }
+ nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
+ GFP_KERNEL);
+ if (!nn->tx_rings) {
+ err = -ENOMEM;
+ goto err_free_rx_rings;
+ }
-err_stop_tx:
- netif_tx_disable(netdev);
- for (r = 0; r < nn->num_r_vecs; r++)
- nfp_net_tx_flush(nn->r_vecs[r].tx_ring);
-err_disable_napi:
- while (r--) {
- napi_disable(&nn->r_vecs[r].napi);
- nfp_net_rx_flush(nn->r_vecs[r].rx_ring);
+ for (r = 0; r < nn->num_r_vecs; r++) {
+ err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
+ if (err)
+ goto err_free_prev_vecs;
+
+ err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt);
+ if (err)
+ goto err_cleanup_vec_p;
+
+ err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
+ nn->fl_bufsz, nn->rxd_cnt);
+ if (err)
+ goto err_free_tx_ring_p;
+
+ err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring);
+ if (err)
+ goto err_flush_rx_ring_p;
}
-err_clear_config:
- nfp_net_clear_config_and_disable(nn);
+
+ err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings);
+ if (err)
+ goto err_free_rings;
+
+ err = netif_set_real_num_rx_queues(netdev, nn->num_rx_rings);
+ if (err)
+ goto err_free_rings;
+
+ /* Step 2: Configure the NFP
+ * - Enable rings from 0 to tx_rings/rx_rings - 1.
+ * - Write MAC address (in case it changed)
+ * - Set the MTU
+ * - Set the Freelist buffer size
+ * - Enable the FW
+ */
+ err = nfp_net_set_config_and_enable(nn);
+ if (err)
+ goto err_free_rings;
+
+ /* Step 3: Enable for kernel
+ * - put some freelist descriptors on each RX ring
+ * - enable NAPI on each ring
+ * - enable all TX queues
+ * - set link state
+ */
+ nfp_net_open_stack(nn);
+
+ return 0;
+
err_free_rings:
- nfp_net_free_rings(nn);
+ r = nn->num_r_vecs;
+err_free_prev_vecs:
+ while (r--) {
+ nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
+err_flush_rx_ring_p:
+ nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
+err_free_tx_ring_p:
+ nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
+err_cleanup_vec_p:
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+ }
+ kfree(nn->tx_rings);
+err_free_rx_rings:
+ kfree(nn->rx_rings);
+err_free_lsc:
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
err_free_exn:
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
return err;
}
/**
+ * nfp_net_close_stack() - Quiescent the stack (part of close)
+ * @nn: NFP Net device to reconfigure
+ */
+static void nfp_net_close_stack(struct nfp_net *nn)
+{
+ unsigned int r;
+
+ disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
+ netif_carrier_off(nn->netdev);
+ nn->link_up = false;
+
+ for (r = 0; r < nn->num_r_vecs; r++) {
+ disable_irq(nn->irq_entries[nn->r_vecs[r].irq_idx].vector);
+ napi_disable(&nn->r_vecs[r].napi);
+ }
+
+ netif_tx_disable(nn->netdev);
+}
+
+/**
+ * nfp_net_close_free_all() - Free all runtime resources
+ * @nn: NFP Net device to reconfigure
+ */
+static void nfp_net_close_free_all(struct nfp_net *nn)
+{
+ unsigned int r;
+
+ for (r = 0; r < nn->num_r_vecs; r++) {
+ nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring);
+ nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
+ nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
+ nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
+ }
+
+ kfree(nn->rx_rings);
+ kfree(nn->tx_rings);
+
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
+ nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
+}
+
+/**
* nfp_net_netdev_close() - Called when the device is downed
* @netdev: netdev structure
*/
static int nfp_net_netdev_close(struct net_device *netdev)
{
struct nfp_net *nn = netdev_priv(netdev);
- int r;
if (!(nn->ctrl & NFP_NET_CFG_CTRL_ENABLE)) {
nn_err(nn, "Dev is not up: 0x%08x\n", nn->ctrl);
@@ -1855,14 +2182,7 @@ static int nfp_net_netdev_close(struct net_device *netdev)
/* Step 1: Disable RX and TX rings from the Linux kernel perspective
*/
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
- netif_carrier_off(netdev);
- nn->link_up = false;
-
- for (r = 0; r < nn->num_r_vecs; r++)
- napi_disable(&nn->r_vecs[r].napi);
-
- netif_tx_disable(netdev);
+ nfp_net_close_stack(nn);
/* Step 2: Tell NFP
*/
@@ -1870,13 +2190,7 @@ static int nfp_net_netdev_close(struct net_device *netdev)
/* Step 3: Free resources
*/
- for (r = 0; r < nn->num_r_vecs; r++) {
- nfp_net_rx_flush(nn->r_vecs[r].rx_ring);
- nfp_net_tx_flush(nn->r_vecs[r].tx_ring);
- }
-
- nfp_net_free_rings(nn);
- nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
+ nfp_net_close_free_all(nn);
nn_dbg(nn, "%s down", netdev->name);
return 0;
@@ -1902,37 +2216,139 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
return;
nn_writel(nn, NFP_NET_CFG_CTRL, new_ctrl);
- if (nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN))
- return;
+ nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_GEN);
nn->ctrl = new_ctrl;
}
static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
{
+ unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz;
struct nfp_net *nn = netdev_priv(netdev);
- u32 tmp;
-
- nn_dbg(nn, "New MTU = %d\n", new_mtu);
+ struct nfp_net_rx_ring *tmp_rings;
+ int err;
if (new_mtu < 68 || new_mtu > nn->max_mtu) {
nn_err(nn, "New MTU (%d) is not valid\n", new_mtu);
return -EINVAL;
}
+ old_mtu = netdev->mtu;
+ old_fl_bufsz = nn->fl_bufsz;
+ new_fl_bufsz = NFP_NET_MAX_PREPEND + ETH_HLEN + VLAN_HLEN * 2 + new_mtu;
+
+ if (!netif_running(netdev)) {
+ netdev->mtu = new_mtu;
+ nn->fl_bufsz = new_fl_bufsz;
+ return 0;
+ }
+
+ /* Prepare new rings */
+ tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz,
+ nn->rxd_cnt);
+ if (!tmp_rings)
+ return -ENOMEM;
+
+ /* Stop device, swap in new rings, try to start the firmware */
+ nfp_net_close_stack(nn);
+ nfp_net_clear_config_and_disable(nn);
+
+ tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
+
netdev->mtu = new_mtu;
+ nn->fl_bufsz = new_fl_bufsz;
+
+ err = nfp_net_set_config_and_enable(nn);
+ if (err) {
+ const int err_new = err;
+
+ /* Try with old configuration and old rings */
+ tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings);
- /* Freelist buffer size rounded up to the nearest 1K */
- tmp = new_mtu + ETH_HLEN + VLAN_HLEN + NFP_NET_MAX_PREPEND;
- nn->fl_bufsz = roundup(tmp, 1024);
+ netdev->mtu = old_mtu;
+ nn->fl_bufsz = old_fl_bufsz;
- /* restart if running */
- if (netif_running(netdev)) {
- nfp_net_netdev_close(netdev);
- nfp_net_netdev_open(netdev);
+ err = __nfp_net_set_config_and_enable(nn);
+ if (err)
+ nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n",
+ err_new, err);
}
- return 0;
+ nfp_net_shadow_rx_rings_free(nn, tmp_rings);
+
+ nfp_net_open_stack(nn);
+
+ return err;
+}
+
+int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
+{
+ struct nfp_net_tx_ring *tx_rings = NULL;
+ struct nfp_net_rx_ring *rx_rings = NULL;
+ u32 old_rxd_cnt, old_txd_cnt;
+ int err;
+
+ if (!netif_running(nn->netdev)) {
+ nn->rxd_cnt = rxd_cnt;
+ nn->txd_cnt = txd_cnt;
+ return 0;
+ }
+
+ old_rxd_cnt = nn->rxd_cnt;
+ old_txd_cnt = nn->txd_cnt;
+
+ /* Prepare new rings */
+ if (nn->rxd_cnt != rxd_cnt) {
+ rx_rings = nfp_net_shadow_rx_rings_prepare(nn, nn->fl_bufsz,
+ rxd_cnt);
+ if (!rx_rings)
+ return -ENOMEM;
+ }
+ if (nn->txd_cnt != txd_cnt) {
+ tx_rings = nfp_net_shadow_tx_rings_prepare(nn, txd_cnt);
+ if (!tx_rings) {
+ nfp_net_shadow_rx_rings_free(nn, rx_rings);
+ return -ENOMEM;
+ }
+ }
+
+ /* Stop device, swap in new rings, try to start the firmware */
+ nfp_net_close_stack(nn);
+ nfp_net_clear_config_and_disable(nn);
+
+ if (rx_rings)
+ rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
+ if (tx_rings)
+ tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
+
+ nn->rxd_cnt = rxd_cnt;
+ nn->txd_cnt = txd_cnt;
+
+ err = nfp_net_set_config_and_enable(nn);
+ if (err) {
+ const int err_new = err;
+
+ /* Try with old configuration and old rings */
+ if (rx_rings)
+ rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
+ if (tx_rings)
+ tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
+
+ nn->rxd_cnt = old_rxd_cnt;
+ nn->txd_cnt = old_txd_cnt;
+
+ err = __nfp_net_set_config_and_enable(nn);
+ if (err)
+ nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
+ err_new, err);
+ }
+
+ nfp_net_shadow_rx_rings_free(nn, rx_rings);
+ nfp_net_shadow_tx_rings_free(nn, tx_rings);
+
+ nfp_net_open_stack(nn);
+
+ return err;
}
static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
@@ -2108,7 +2524,7 @@ static void nfp_net_set_vxlan_port(struct nfp_net *nn, int idx, __be16 port)
be16_to_cpu(nn->vxlan_ports[i + 1]) << 16 |
be16_to_cpu(nn->vxlan_ports[i]));
- nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_VXLAN);
+ nfp_net_reconfig_post(nn, NFP_NET_CFG_UPDATE_VXLAN);
}
/**
@@ -2135,27 +2551,33 @@ static int nfp_net_find_vxlan_idx(struct nfp_net *nn, __be16 port)
}
static void nfp_net_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct nfp_net *nn = netdev_priv(netdev);
int idx;
- idx = nfp_net_find_vxlan_idx(nn, port);
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ idx = nfp_net_find_vxlan_idx(nn, ti->port);
if (idx == -ENOSPC)
return;
if (!nn->vxlan_usecnt[idx]++)
- nfp_net_set_vxlan_port(nn, idx, port);
+ nfp_net_set_vxlan_port(nn, idx, ti->port);
}
static void nfp_net_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct nfp_net *nn = netdev_priv(netdev);
int idx;
- idx = nfp_net_find_vxlan_idx(nn, port);
- if (!nn->vxlan_usecnt[idx] || idx == -ENOSPC)
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
+ idx = nfp_net_find_vxlan_idx(nn, ti->port);
+ if (idx == -ENOSPC || !nn->vxlan_usecnt[idx])
return;
if (!--nn->vxlan_usecnt[idx])
@@ -2173,8 +2595,8 @@ static const struct net_device_ops nfp_net_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_features = nfp_net_set_features,
.ndo_features_check = nfp_net_features_check,
- .ndo_add_vxlan_port = nfp_net_add_vxlan_port,
- .ndo_del_vxlan_port = nfp_net_del_vxlan_port,
+ .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
+ .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
};
/**
@@ -2254,6 +2676,9 @@ struct nfp_net *nfp_net_netdev_alloc(struct pci_dev *pdev,
spin_lock_init(&nn->reconfig_lock);
spin_lock_init(&nn->link_status_lock);
+ setup_timer(&nn->reconfig_timer,
+ nfp_net_reconfig_timer, (unsigned long)nn);
+
return nn;
}
@@ -2314,7 +2739,7 @@ int nfp_net_netdev_init(struct net_device *netdev)
nn->cap = nn_readl(nn, NFP_NET_CFG_CAP);
nn->max_mtu = nn_readl(nn, NFP_NET_CFG_MAX_MTU);
- nfp_net_write_mac_addr(nn, nn->netdev->dev_addr);
+ nfp_net_write_mac_addr(nn);
/* Set default MTU and Freelist buffer size */
if (nn->max_mtu < NFP_NET_DEFAULT_MTU)
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 8692003aeed8..ad6c4e31cedd 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -81,14 +81,10 @@
/**
* @NFP_NET_TXR_MAX: Maximum number of TX rings
- * @NFP_NET_TXR_MASK: Mask for TX rings
* @NFP_NET_RXR_MAX: Maximum number of RX rings
- * @NFP_NET_RXR_MASK: Mask for RX rings
*/
#define NFP_NET_TXR_MAX 64
-#define NFP_NET_TXR_MASK (NFP_NET_TXR_MAX - 1)
#define NFP_NET_RXR_MAX 64
-#define NFP_NET_RXR_MASK (NFP_NET_RXR_MAX - 1)
/**
* Read/Write config words (0x0000 - 0x002c)
@@ -152,9 +148,9 @@
* @NFP_NET_CFG_VERSION: Firmware version number
* @NFP_NET_CFG_STS: Status
* @NFP_NET_CFG_CAP: Capabilities (same bits as @NFP_NET_CFG_CTRL)
- * @NFP_NET_MAX_TXRINGS: Maximum number of TX rings
- * @NFP_NET_MAX_RXRINGS: Maximum number of RX rings
- * @NFP_NET_MAX_MTU: Maximum support MTU
+ * @NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
+ * @NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
+ * @NFP_NET_CFG_MAX_MTU: Maximum support MTU
* @NFP_NET_CFG_START_TXQ: Start Queue Control Queue to use for TX (PF only)
* @NFP_NET_CFG_START_RXQ: Start Queue Control Queue to use for RX (PF only)
*
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
index 4c97c713121c..f7c9a5bc4aa3 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c
@@ -40,8 +40,9 @@ static struct dentry *nfp_dir;
static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
{
- struct nfp_net_rx_ring *rx_ring = file->private;
int fl_rd_p, fl_wr_p, rx_rd_p, rx_wr_p, rxd_cnt;
+ struct nfp_net_r_vector *r_vec = file->private;
+ struct nfp_net_rx_ring *rx_ring;
struct nfp_net_rx_desc *rxd;
struct sk_buff *skb;
struct nfp_net *nn;
@@ -49,9 +50,10 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
rtnl_lock();
- if (!rx_ring->r_vec || !rx_ring->r_vec->nfp_net)
+ if (!r_vec->nfp_net || !r_vec->rx_ring)
goto out;
- nn = rx_ring->r_vec->nfp_net;
+ nn = r_vec->nfp_net;
+ rx_ring = r_vec->rx_ring;
if (!netif_running(nn->netdev))
goto out;
@@ -115,7 +117,8 @@ static const struct file_operations nfp_rx_q_fops = {
static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
{
- struct nfp_net_tx_ring *tx_ring = file->private;
+ struct nfp_net_r_vector *r_vec = file->private;
+ struct nfp_net_tx_ring *tx_ring;
struct nfp_net_tx_desc *txd;
int d_rd_p, d_wr_p, txd_cnt;
struct sk_buff *skb;
@@ -124,9 +127,10 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
rtnl_lock();
- if (!tx_ring->r_vec || !tx_ring->r_vec->nfp_net)
+ if (!r_vec->nfp_net || !r_vec->tx_ring)
goto out;
- nn = tx_ring->r_vec->nfp_net;
+ nn = r_vec->nfp_net;
+ tx_ring = r_vec->tx_ring;
if (!netif_running(nn->netdev))
goto out;
@@ -183,7 +187,7 @@ static const struct file_operations nfp_tx_q_fops = {
void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
{
- static struct dentry *queues, *tx, *rx;
+ struct dentry *queues, *tx, *rx;
char int_name[16];
int i;
@@ -196,7 +200,7 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
/* Create queue debugging sub-tree */
queues = debugfs_create_dir("queue", nn->debugfs_dir);
- if (IS_ERR_OR_NULL(nn->debugfs_dir))
+ if (IS_ERR_OR_NULL(queues))
return;
rx = debugfs_create_dir("rx", queues);
@@ -207,13 +211,13 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
for (i = 0; i < nn->num_rx_rings; i++) {
sprintf(int_name, "%d", i);
debugfs_create_file(int_name, S_IRUSR, rx,
- &nn->rx_rings[i], &nfp_rx_q_fops);
+ &nn->r_vecs[i], &nfp_rx_q_fops);
}
for (i = 0; i < nn->num_tx_rings; i++) {
sprintf(int_name, "%d", i);
debugfs_create_file(int_name, S_IRUSR, tx,
- &nn->tx_rings[i], &nfp_tx_q_fops);
+ &nn->r_vecs[i], &nfp_tx_q_fops);
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 9a4084a68db5..4c9897220969 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -40,7 +40,6 @@
* Brad Petrus <brad.petrus@netronome.com>
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -153,37 +152,25 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
u32 rxd_cnt, txd_cnt;
- if (netif_running(netdev)) {
- /* Some NIC drivers allow reconfiguration on the fly,
- * some down the interface, change and then up it
- * again. For now we don't allow changes when the
- * device is up.
- */
- nn_warn(nn, "Can't change rings while device is up\n");
- return -EBUSY;
- }
-
/* We don't have separate queues/rings for small/large frames. */
if (ring->rx_mini_pending || ring->rx_jumbo_pending)
return -EINVAL;
/* Round up to supported values */
rxd_cnt = roundup_pow_of_two(ring->rx_pending);
- rxd_cnt = max_t(u32, rxd_cnt, NFP_NET_MIN_RX_DESCS);
- rxd_cnt = min_t(u32, rxd_cnt, NFP_NET_MAX_RX_DESCS);
-
txd_cnt = roundup_pow_of_two(ring->tx_pending);
- txd_cnt = max_t(u32, txd_cnt, NFP_NET_MIN_TX_DESCS);
- txd_cnt = min_t(u32, txd_cnt, NFP_NET_MAX_TX_DESCS);
- if (nn->rxd_cnt != rxd_cnt || nn->txd_cnt != txd_cnt)
- nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
- nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
+ if (rxd_cnt < NFP_NET_MIN_RX_DESCS || rxd_cnt > NFP_NET_MAX_RX_DESCS ||
+ txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
+ return -EINVAL;
- nn->rxd_cnt = rxd_cnt;
- nn->txd_cnt = txd_cnt;
+ if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
+ return 0;
- return 0;
+ nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
+ nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
+
+ return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
}
static void nfp_net_get_strings(struct net_device *netdev,
@@ -617,6 +604,7 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_drvinfo = nfp_net_get_drvinfo,
+ .get_link = ethtool_op_get_link,
.get_ringparam = nfp_net_get_ringparam,
.set_ringparam = nfp_net_set_ringparam,
.get_strings = nfp_net_get_strings,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index e2b22b8a20f1..f7062cb648e1 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -38,7 +38,6 @@
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -124,17 +123,17 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
* first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code
* the identical for PF and VF drivers.
*/
- ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CRTL_BAR),
+ ctrl_bar = ioremap_nocache(pci_resource_start(pdev, NFP_NET_CTRL_BAR),
NFP_NET_CFG_BAR_SZ);
if (!ctrl_bar) {
dev_err(&pdev->dev,
- "Failed to map resource %d\n", NFP_NET_CRTL_BAR);
+ "Failed to map resource %d\n", NFP_NET_CTRL_BAR);
err = -EIO;
goto err_pci_regions;
}
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
- if (fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+ if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
err = -EINVAL;
@@ -142,9 +141,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
/* Determine stride */
- if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 0) ||
- nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1) ||
- nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0x12, 0x48)) {
+ if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
stride = 2;
tx_bar_no = NFP_NET_Q0_BAR;
rx_bar_no = NFP_NET_Q1_BAR;
diff --git a/drivers/net/ethernet/netx-eth.c b/drivers/net/ethernet/netx-eth.c
index 9fbc30264237..adbc47f2d132 100644
--- a/drivers/net/ethernet/netx-eth.c
+++ b/drivers/net/ethernet/netx-eth.c
@@ -313,7 +313,8 @@ static int netx_eth_enable(struct net_device *ndev)
{
struct netx_eth_priv *priv = netdev_priv(ndev);
unsigned int mac4321, mac65;
- int running, i;
+ int running, i, ret;
+ bool inv_mac_addr = false;
ndev->netdev_ops = &netx_eth_netdev_ops;
ndev->watchdog_timeo = msecs_to_jiffies(5000);
@@ -358,15 +359,18 @@ static int netx_eth_enable(struct net_device *ndev)
xc_start(priv->xc);
if (!is_valid_ether_addr(ndev->dev_addr))
- printk("%s: Invalid ethernet MAC address. Please "
- "set using ifconfig\n", ndev->name);
+ inv_mac_addr = true;
for (i=2; i<=18; i++)
pfifo_push(EMPTY_PTR_FIFO(priv->id),
FIFO_PTR_FRAMENO(i) | FIFO_PTR_SEGMENT(priv->id));
- return register_netdev(ndev);
+ ret = register_netdev(ndev);
+ if (inv_mac_addr)
+ printk("%s: Invalid ethernet MAC address. Please set using ip\n",
+ ndev->name);
+ return ret;
}
static int netx_eth_drv_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index 52d9a94aebb9..87b7b814778b 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -476,7 +476,7 @@ static void w90p910_reset_mac(struct net_device *dev)
w90p910_init_desc(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
ether->cur_tx = 0x0;
ether->finish_tx = 0x0;
ether->cur_rx = 0x0;
@@ -490,7 +490,7 @@ static void w90p910_reset_mac(struct net_device *dev)
w90p910_trigger_tx(dev);
w90p910_trigger_rx(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index b1ce7aaa8f8b..8e13ec84c538 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -425,7 +425,6 @@ struct netdata_local {
unsigned int last_tx_idx;
unsigned int num_used_tx_buffs;
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
struct clk *clk;
dma_addr_t dma_buff_base_p;
void *dma_buff_base_v;
@@ -476,14 +475,6 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
mac[5] = tmp >> 8;
}
-static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable)
-{
- if (enable)
- clk_prepare_enable(pldat->clk);
- else
- clk_disable_unprepare(pldat->clk);
-}
-
static void __lpc_params_setup(struct netdata_local *pldat)
{
u32 tmp;
@@ -750,7 +741,7 @@ static int lpc_mdio_reset(struct mii_bus *bus)
static void lpc_handle_link_change(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
- struct phy_device *phydev = pldat->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
unsigned long flags;
bool status_change = false;
@@ -814,7 +805,6 @@ static int lpc_mii_probe(struct net_device *ndev)
pldat->link = 0;
pldat->speed = 0;
pldat->duplex = -1;
- pldat->phy_dev = phydev;
phy_attached_info(phydev);
@@ -1048,8 +1038,8 @@ static int lpc_eth_close(struct net_device *ndev)
napi_disable(&pldat->napi);
netif_stop_queue(ndev);
- if (pldat->phy_dev)
- phy_stop(pldat->phy_dev);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
spin_lock_irqsave(&pldat->lock, flags);
__lpc_eth_reset(pldat);
@@ -1058,7 +1048,7 @@ static int lpc_eth_close(struct net_device *ndev)
writel(0, LPC_ENET_MAC2(pldat->net_base));
spin_unlock_irqrestore(&pldat->lock, flags);
- __lpc_eth_clock_enable(pldat, false);
+ clk_disable_unprepare(pldat->clk);
return 0;
}
@@ -1185,8 +1175,7 @@ static void lpc_eth_set_multicast_list(struct net_device *ndev)
static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
{
- struct netdata_local *pldat = netdev_priv(ndev);
- struct phy_device *phydev = pldat->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
if (!netif_running(ndev))
return -EINVAL;
@@ -1200,21 +1189,24 @@ static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
static int lpc_eth_open(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
+ int ret;
if (netif_msg_ifup(pldat))
dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
- __lpc_eth_clock_enable(pldat, true);
+ ret = clk_prepare_enable(pldat->clk);
+ if (ret)
+ return ret;
/* Suspended PHY makes LPC ethernet core block, so resume now */
- phy_resume(pldat->phy_dev);
+ phy_resume(ndev->phydev);
/* Reset and initialize */
__lpc_eth_reset(pldat);
__lpc_eth_init(pldat);
/* schedule a link state check */
- phy_start(pldat->phy_dev);
+ phy_start(ndev->phydev);
netif_start_queue(ndev);
napi_enable(&pldat->napi);
@@ -1247,37 +1239,13 @@ static void lpc_eth_ethtool_setmsglevel(struct net_device *ndev, u32 level)
pldat->msg_enable = level;
}
-static int lpc_eth_ethtool_getsettings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct netdata_local *pldat = netdev_priv(ndev);
- struct phy_device *phydev = pldat->phy_dev;
-
- if (!phydev)
- return -EOPNOTSUPP;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int lpc_eth_ethtool_setsettings(struct net_device *ndev,
- struct ethtool_cmd *cmd)
-{
- struct netdata_local *pldat = netdev_priv(ndev);
- struct phy_device *phydev = pldat->phy_dev;
-
- if (!phydev)
- return -EOPNOTSUPP;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static const struct ethtool_ops lpc_eth_ethtool_ops = {
.get_drvinfo = lpc_eth_ethtool_getdrvinfo,
- .get_settings = lpc_eth_ethtool_getsettings,
- .set_settings = lpc_eth_ethtool_setsettings,
.get_msglevel = lpc_eth_ethtool_getmsglevel,
.set_msglevel = lpc_eth_ethtool_setmsglevel,
.get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops lpc_netdev_ops = {
@@ -1347,7 +1315,9 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
}
/* Enable network clock */
- __lpc_eth_clock_enable(pldat, true);
+ ret = clk_prepare_enable(pldat->clk);
+ if (ret)
+ goto err_out_clk_put;
/* Map IO space */
pldat->net_base = ioremap(res->start, resource_size(res));
@@ -1460,7 +1430,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
res->start, ndev->irq);
- phydev = pldat->phy_dev;
+ phydev = ndev->phydev;
device_init_wakeup(&pdev->dev, 1);
device_set_wakeup_enable(&pdev->dev, 0);
@@ -1481,6 +1451,7 @@ err_out_iounmap:
iounmap(pldat->net_base);
err_out_disable_clocks:
clk_disable_unprepare(pldat->clk);
+err_out_clk_put:
clk_put(pldat->clk);
err_out_free_dev:
free_netdev(ndev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
index 2a55d6d53ee6..8d710a3b4db0 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe.h
@@ -481,7 +481,6 @@ struct pch_gbe_buffer {
/**
* struct pch_gbe_tx_ring - tx ring information
- * @tx_lock: spinlock structs
* @desc: pointer to the descriptor ring memory
* @dma: physical address of the descriptor ring
* @size: length of descriptor ring in bytes
@@ -491,7 +490,6 @@ struct pch_gbe_buffer {
* @buffer_info: array of buffer information structs
*/
struct pch_gbe_tx_ring {
- spinlock_t tx_lock;
struct pch_gbe_tx_desc *desc;
dma_addr_t dma;
unsigned int size;
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 3b98b263bad0..3cd87a41ac92 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1640,7 +1640,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
cleaned_count);
if (cleaned_count > 0) { /*skip this if nothing cleaned*/
/* Recover from running out of Tx resources in xmit_frame */
- spin_lock(&tx_ring->tx_lock);
+ netif_tx_lock(adapter->netdev);
if (unlikely(cleaned && (netif_queue_stopped(adapter->netdev))))
{
netif_wake_queue(adapter->netdev);
@@ -1652,7 +1652,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter,
netdev_dbg(adapter->netdev, "next_to_clean : %d\n",
tx_ring->next_to_clean);
- spin_unlock(&tx_ring->tx_lock);
+ netif_tx_unlock(adapter->netdev);
}
return cleaned;
}
@@ -1805,7 +1805,6 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- spin_lock_init(&tx_ring->tx_lock);
for (desNo = 0; desNo < tx_ring->count; desNo++) {
tx_desc = PCH_GBE_TX_DESC(*tx_ring, desNo);
@@ -2135,15 +2134,9 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
- unsigned long flags;
- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
- /* Collision - tell upper layer to requeue */
- return NETDEV_TX_LOCKED;
- }
if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
netif_stop_queue(netdev);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
netdev_dbg(netdev,
"Return : BUSY next_to use : 0x%08x next_to clean : 0x%08x\n",
tx_ring->next_to_use, tx_ring->next_to_clean);
@@ -2152,7 +2145,6 @@ static int pch_gbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* CRC,ITAG no support */
pch_gbe_tx_queue(adapter, tx_ring, skb);
- spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 13d88a6025c8..91be2f02ef1c 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -1144,7 +1144,7 @@ static void hamachi_tx_timeout(struct net_device *dev)
hmp->rx_ring[RX_RING_SIZE-1].status_n_length |= cpu_to_le32(DescEndRing);
/* Trigger an immediate transmit demand. */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
/* Restart the chip's Tx/Rx processes . */
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index fa2db41e02f8..fb1d1031b091 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -714,7 +714,7 @@ static void yellowfin_tx_timeout(struct net_device *dev)
if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE)
netif_wake_queue (dev); /* Typical path */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
}
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index af54df52aa6b..2f4a837f0d6a 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -989,7 +989,7 @@ static void pasemi_adjust_link(struct net_device *dev)
unsigned int flags;
unsigned int new_flags;
- if (!mac->phydev->link) {
+ if (!dev->phydev->link) {
/* If no link, MAC speed settings don't matter. Just report
* link down and return.
*/
@@ -1010,10 +1010,10 @@ static void pasemi_adjust_link(struct net_device *dev)
new_flags = flags & ~(PAS_MAC_CFG_PCFG_HD | PAS_MAC_CFG_PCFG_SPD_M |
PAS_MAC_CFG_PCFG_TSR_M);
- if (!mac->phydev->duplex)
+ if (!dev->phydev->duplex)
new_flags |= PAS_MAC_CFG_PCFG_HD;
- switch (mac->phydev->speed) {
+ switch (dev->phydev->speed) {
case 1000:
new_flags |= PAS_MAC_CFG_PCFG_SPD_1G |
PAS_MAC_CFG_PCFG_TSR_1G;
@@ -1027,15 +1027,15 @@ static void pasemi_adjust_link(struct net_device *dev)
PAS_MAC_CFG_PCFG_TSR_10M;
break;
default:
- printk("Unsupported speed %d\n", mac->phydev->speed);
+ printk("Unsupported speed %d\n", dev->phydev->speed);
}
/* Print on link or speed/duplex change */
- msg = mac->link != mac->phydev->link || flags != new_flags;
+ msg = mac->link != dev->phydev->link || flags != new_flags;
- mac->duplex = mac->phydev->duplex;
- mac->speed = mac->phydev->speed;
- mac->link = mac->phydev->link;
+ mac->duplex = dev->phydev->duplex;
+ mac->speed = dev->phydev->speed;
+ mac->link = dev->phydev->link;
if (new_flags != flags)
write_mac_reg(mac, PAS_MAC_CFG_PCFG, new_flags);
@@ -1067,8 +1067,6 @@ static int pasemi_mac_phy_init(struct net_device *dev)
return -ENODEV;
}
- mac->phydev = phydev;
-
return 0;
}
@@ -1198,8 +1196,8 @@ static int pasemi_mac_open(struct net_device *dev)
goto out_rx_int;
}
- if (mac->phydev)
- phy_start(mac->phydev);
+ if (dev->phydev)
+ phy_start(dev->phydev);
setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
(unsigned long)mac->tx);
@@ -1293,9 +1291,9 @@ static int pasemi_mac_close(struct net_device *dev)
rxch = rx_ring(mac)->chan.chno;
txch = tx_ring(mac)->chan.chno;
- if (mac->phydev) {
- phy_stop(mac->phydev);
- phy_disconnect(mac->phydev);
+ if (dev->phydev) {
+ phy_stop(dev->phydev);
+ phy_disconnect(dev->phydev);
}
del_timer_sync(&mac->tx->clean_timer);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.h b/drivers/net/ethernet/pasemi/pasemi_mac.h
index 161c99a98403..7c47e263b8c1 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.h
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.h
@@ -70,7 +70,6 @@ struct pasemi_mac {
struct pci_dev *pdev;
struct pci_dev *dma_pdev;
struct pci_dev *iob_pdev;
- struct phy_device *phydev;
struct napi_struct napi;
int bufsz; /* RX ring buffer size */
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
index f046bfc18e7d..d0afc2b8f8e3 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac_ethtool.c
@@ -62,32 +62,6 @@ static struct {
{ "tx-1024-1518-byte-packets" },
};
-static int
-pasemi_mac_ethtool_get_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct pasemi_mac *mac = netdev_priv(netdev);
- struct phy_device *phydev = mac->phydev;
-
- if (!phydev)
- return -EOPNOTSUPP;
-
- return phy_ethtool_gset(phydev, cmd);
-}
-
-static int
-pasemi_mac_ethtool_set_settings(struct net_device *netdev,
- struct ethtool_cmd *cmd)
-{
- struct pasemi_mac *mac = netdev_priv(netdev);
- struct phy_device *phydev = mac->phydev;
-
- if (!phydev)
- return -EOPNOTSUPP;
-
- return phy_ethtool_sset(phydev, cmd);
-}
-
static u32
pasemi_mac_ethtool_get_msglevel(struct net_device *netdev)
{
@@ -145,8 +119,6 @@ static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset,
}
const struct ethtool_ops pasemi_mac_ethtool_ops = {
- .get_settings = pasemi_mac_ethtool_get_settings,
- .set_settings = pasemi_mac_ethtool_set_settings,
.get_msglevel = pasemi_mac_ethtool_get_msglevel,
.set_msglevel = pasemi_mac_ethtool_set_msglevel,
.get_link = ethtool_op_get_link,
@@ -154,5 +126,7 @@ const struct ethtool_ops pasemi_mac_ethtool_ops = {
.get_strings = pasemi_mac_get_strings,
.get_sset_count = pasemi_mac_get_sset_count,
.get_ethtool_stats = pasemi_mac_get_ethtool_stats,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index ddcfcab034c2..6ba48406899e 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -54,16 +54,6 @@ config QLCNIC_DCB
mode of DCB is supported. PG and PFC values are related only
to Tx.
-config QLCNIC_VXLAN
- bool "Virtual eXtensible Local Area Network (VXLAN) offload support"
- default n
- depends on QLCNIC && VXLAN && !(QLCNIC=y && VXLAN=m)
- ---help---
- This enables hardware offload support for VXLAN protocol over QLogic's
- 84XX series adapters.
- Say Y here if you want to enable hardware offload support for
- Virtual eXtensible Local Area Network (VXLAN) in the driver.
-
config QLCNIC_HWMON
bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
@@ -98,9 +88,20 @@ config QED
---help---
This enables the support for ...
+config QED_SRIOV
+ bool "QLogic QED 25/40/100Gb SR-IOV support"
+ depends on QED && PCI_IOV
+ default y
+ ---help---
+ This configuration parameter enables Single Root Input Output
+ Virtualization support for QED devices.
+ This allows for virtual function acceleration in virtualized
+ environments.
+
config QEDE
tristate "QLogic QED 25/40/100Gb Ethernet NIC"
depends on QED
---help---
This enables the support for ...
+
endif # NET_VENDOR_QLOGIC
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 9c6eed9b45f7..7a0281a36c28 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2286,7 +2286,7 @@ static void netxen_tx_timeout_task(struct work_struct *work)
goto request_reset;
}
}
- adapter->netdev->trans_start = jiffies;
+ netif_trans_update(adapter->netdev);
rtnl_unlock();
return;
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index 5c2fd57236fe..d1f157e439cf 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -1,4 +1,6 @@
obj-$(CONFIG_QED) := qed.o
qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
- qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o
+ qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
+ qed_selftest.o qed_dcbx.o
+qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index fcb8e9ba51d9..45ab74676573 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -26,12 +26,14 @@
#include "qed_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
-#define DRV_MODULE_VERSION "8.7.0.0"
+#define DRV_MODULE_VERSION "8.7.1.20"
#define MAX_HWFNS_PER_DEVICE (4)
#define NAME_SIZE 16
#define VER_SIZE 16
+#define QED_WFQ_UNIT 100
+
/* cau states */
enum qed_coalescing_mode {
QED_COAL_MODE_DISABLE,
@@ -74,12 +76,59 @@ struct qed_rt_data {
bool *b_valid;
};
+enum qed_tunn_mode {
+ QED_MODE_L2GENEVE_TUNN,
+ QED_MODE_IPGENEVE_TUNN,
+ QED_MODE_L2GRE_TUNN,
+ QED_MODE_IPGRE_TUNN,
+ QED_MODE_VXLAN_TUNN,
+};
+
+enum qed_tunn_clss {
+ QED_TUNN_CLSS_MAC_VLAN,
+ QED_TUNN_CLSS_MAC_VNI,
+ QED_TUNN_CLSS_INNER_MAC_VLAN,
+ QED_TUNN_CLSS_INNER_MAC_VNI,
+ MAX_QED_TUNN_CLSS,
+};
+
+struct qed_tunn_start_params {
+ unsigned long tunn_mode;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
+};
+
+struct qed_tunn_update_params {
+ unsigned long tunn_mode_update_mask;
+ unsigned long tunn_mode;
+ u16 vxlan_udp_port;
+ u16 geneve_udp_port;
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 update_vxlan_udp_port;
+ u8 update_geneve_udp_port;
+ u8 tunn_clss_vxlan;
+ u8 tunn_clss_l2geneve;
+ u8 tunn_clss_ipgeneve;
+ u8 tunn_clss_l2gre;
+ u8 tunn_clss_ipgre;
+};
+
/* The PCI personality is not quite synonymous to protocol ID:
* 1. All personalities need CORE connections
* 2. The Ethernet personality may support also the RoCE protocol
*/
enum qed_pci_personality {
QED_PCI_ETH,
+ QED_PCI_ISCSI,
+ QED_PCI_ETH_ROCE,
QED_PCI_DEFAULT /* default in shmem */
};
@@ -105,6 +154,7 @@ enum QED_RESOURCES {
enum QED_FEATURE {
QED_PF_L2_QUE,
+ QED_VF,
QED_MAX_FEATURES,
};
@@ -122,6 +172,8 @@ enum QED_PORT_MODE {
enum qed_dev_cap {
QED_DEV_CAP_ETH,
+ QED_DEV_CAP_ISCSI,
+ QED_DEV_CAP_ROCE,
};
struct qed_hw_info {
@@ -135,6 +187,8 @@ struct qed_hw_info {
#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
+#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
+ RESC_NUM(_p_hwfn, resc))
#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
u8 num_tc;
@@ -192,6 +246,12 @@ struct qed_dmae_info {
struct dmae_cmd *p_dmae_cmd;
};
+struct qed_wfq_data {
+ /* when feature is configured for at least 1 vport */
+ u32 min_speed;
+ bool configured;
+};
+
struct qed_qm_info {
struct init_qm_pq_params *qm_pq_params;
struct init_qm_vport_params *qm_vport_params;
@@ -201,6 +261,7 @@ struct qed_qm_info {
u8 pure_lb_pq;
u8 offload_pq;
u8 pure_ack_pq;
+ u8 ooo_pq;
u8 vf_queues_offset;
u16 num_pqs;
u16 num_vf_pqs;
@@ -212,6 +273,8 @@ struct qed_qm_info {
bool vport_wfq_en;
u8 pf_wfq;
u32 pf_rl;
+ struct qed_wfq_data *wfq_data;
+ u8 num_pf_rls;
};
struct storm_stats {
@@ -256,6 +319,9 @@ struct qed_hwfn {
bool first_on_engine;
bool hw_init_done;
+ u8 num_funcs_on_engine;
+ u8 enabled_func_idx;
+
/* BAR access */
void __iomem *regview;
void __iomem *doorbells;
@@ -293,6 +359,9 @@ struct qed_hwfn {
/* Protocol related */
struct qed_pf_params pf_params;
+ bool b_rdma_enabled_in_prs;
+ u32 rdma_prs_search_reg;
+
/* Array of sb_info of all status blocks */
struct qed_sb_info *sbs_info[MAX_SB_PER_PF_MIMD];
u16 num_sbs;
@@ -306,8 +375,12 @@ struct qed_hwfn {
/* True if the driver requests for the link */
bool b_drv_link_init;
+ struct qed_vf_iov *vf_iov_info;
+ struct qed_pf_iov *pf_iov_info;
struct qed_mcp_info *mcp_info;
+ struct qed_dcbx_info *p_dcbx_info;
+
struct qed_hw_cid_data *p_tx_cids;
struct qed_hw_cid_data *p_rx_cids;
@@ -322,6 +395,12 @@ struct qed_hwfn {
struct qed_simd_fp_handler simd_proto_handler[64];
+#ifdef CONFIG_QED_SRIOV
+ struct workqueue_struct *iov_wq;
+ struct delayed_work iov_task;
+ unsigned long iov_task_flags;
+#endif
+
struct z_stream_s *stream;
};
@@ -410,8 +489,8 @@ struct qed_dev {
u32 int_mode;
enum qed_coalescing_mode int_coalescing_mode;
- u8 rx_coalesce_usecs;
- u8 tx_coalesce_usecs;
+ u16 rx_coalesce_usecs;
+ u16 tx_coalesce_usecs;
/* Start Bar offset of first hwfn */
void __iomem *regview;
@@ -430,6 +509,13 @@ struct qed_dev {
u8 num_hwfns;
struct qed_hwfn hwfns[MAX_HWFNS_PER_DEVICE];
+ /* SRIOV */
+ struct qed_hw_sriov_info *p_iov_info;
+#define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
+
+ unsigned long tunn_mode;
+
+ bool b_is_vf;
u32 drv_type;
struct qed_eth_stats *reset_stats;
@@ -459,6 +545,8 @@ struct qed_dev {
const struct firmware *firmware;
};
+#define NUM_OF_VFS(dev) MAX_NUM_VFS_BB
+#define NUM_OF_L2_QUEUES(dev) MAX_NUM_L2_QUEUES_BB
#define NUM_OF_SBS(dev) MAX_SB_PER_PATH_BB
#define NUM_OF_ENG_PFS(dev) MAX_NUM_PFS_BB
@@ -473,13 +561,27 @@ struct qed_dev {
static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
u32 concrete_fid)
{
+ u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+ u8 vf_valid = GET_FIELD(concrete_fid,
+ PXP_CONCRETE_FID_VFVALID);
+ u8 sw_fid;
+
+ if (vf_valid)
+ sw_fid = vfid + MAX_NUM_PFS;
+ else
+ sw_fid = pfid;
- return pfid;
+ return sw_fid;
}
#define PURE_LB_TC 8
+#define OOO_LB_TC 9
+int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate);
+
+void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
/* Other Linux specific common definitions */
@@ -507,6 +609,4 @@ u32 qed_unzip_data(struct qed_hwfn *p_hwfn,
int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
-#define QED_ETH_INTERFACE_VERSION 300
-
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index fc767c07a264..1c35f376143e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -24,11 +24,13 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
/* Max number of connection types in HW (DQ/CDU etc.) */
#define MAX_CONN_TYPES PROTOCOLID_COMMON
#define NUM_TASK_TYPES 2
#define NUM_TASK_PF_SEGMENTS 4
+#define NUM_TASK_VF_SEGMENTS 1
/* QM constants */
#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
@@ -37,6 +39,14 @@
#define DQ_RANGE_SHIFT 4
#define DQ_RANGE_ALIGN BIT(DQ_RANGE_SHIFT)
+/* Searcher constants */
+#define SRC_MIN_NUM_ELEMS 256
+
+/* Timers constants */
+#define TM_SHIFT 7
+#define TM_ALIGN BIT(TM_SHIFT)
+#define TM_ELEM_SIZE 4
+
/* ILT constants */
#define ILT_DEFAULT_HW_P_SIZE 3
#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
@@ -54,24 +64,71 @@
union conn_context {
struct core_conn_context core_ctx;
struct eth_conn_context eth_ctx;
+ struct iscsi_conn_context iscsi_ctx;
+ struct roce_conn_context roce_ctx;
+};
+
+/* TYPE-0 task context - iSCSI */
+union type0_task_context {
+ struct iscsi_task_context iscsi_ctx;
+};
+
+/* TYPE-1 task context - ROCE */
+union type1_task_context {
+ struct rdma_task_context roce_ctx;
+};
+
+struct src_ent {
+ u8 opaque[56];
+ u64 next;
};
+#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
+#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
+
#define CONN_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
+#define SRQ_CXT_SIZE (sizeof(struct rdma_srq_context))
+
+#define TYPE0_TASK_CXT_SIZE(p_hwfn) \
+ ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
+
+/* Alignment is inherent to the type1_task_context structure */
+#define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
+
/* PF per protocl configuration object */
+#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
+#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
+
+struct qed_tid_seg {
+ u32 count;
+ u8 type;
+ bool has_fl_mem;
+};
+
struct qed_conn_type_cfg {
u32 cid_count;
u32 cid_start;
+ u32 cids_per_vf;
+ struct qed_tid_seg tid_seg[TASK_SEGMENTS];
};
/* ILT Client configuration, Per connection type (protocol) resources. */
#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
+#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
#define CDUC_BLK (0)
+#define SRQ_BLK (0)
+#define CDUT_SEG_BLK(n) (1 + (u8)(n))
+#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
enum ilt_clients {
ILT_CLI_CDUC,
+ ILT_CLI_CDUT,
ILT_CLI_QM,
+ ILT_CLI_TM,
+ ILT_CLI_SRC,
+ ILT_CLI_TSDM,
ILT_CLI_MAX
};
@@ -84,6 +141,7 @@ struct qed_ilt_cli_blk {
u32 total_size; /* 0 means not active */
u32 real_size_in_page;
u32 start_line;
+ u32 dynamic_line_cnt;
};
struct qed_ilt_client_cfg {
@@ -97,6 +155,10 @@ struct qed_ilt_client_cfg {
/* ILT client blocks for PF */
struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
u32 pf_total_lines;
+
+ /* ILT client blocks for VFs */
+ struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
+ u32 vf_total_lines;
};
/* Per Path -
@@ -123,45 +185,237 @@ struct qed_cxt_mngr {
/* computed ILT structure */
struct qed_ilt_client_cfg clients[ILT_CLI_MAX];
+ /* Task type sizes */
+ u32 task_type_size[NUM_TASK_TYPES];
+
+ /* total number of VFs for this hwfn -
+ * ALL VFs are symmetric in terms of HW resources
+ */
+ u32 vf_count;
+
+ /* total number of SRQ's for this hwfn */
+ u32 srq_count;
+
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
/* ILT shadow table */
struct qed_dma_mem *ilt_shadow;
u32 pf_start_line;
+
+ /* Mutex for a dynamic ILT allocation */
+ struct mutex mutex;
+
+ /* SRC T2 */
+ struct qed_dma_mem *t2;
+ u32 t2_num_pages;
+ u64 first_free;
+ u64 last_free;
};
+static bool src_proto(enum protocol_type type)
+{
+ return type == PROTOCOLID_ISCSI ||
+ type == PROTOCOLID_ROCE;
+}
-static u32 qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr)
+static bool tm_cid_proto(enum protocol_type type)
{
- u32 type, pf_cids = 0;
+ return type == PROTOCOLID_ISCSI ||
+ type == PROTOCOLID_ROCE;
+}
- for (type = 0; type < MAX_CONN_TYPES; type++)
- pf_cids += p_mngr->conn_cfg[type].cid_count;
+/* counts the iids for the CDU/CDUC ILT client configuration */
+struct qed_cdu_iids {
+ u32 pf_cids;
+ u32 per_vf_cids;
+};
- return pf_cids;
+static void qed_cxt_cdu_iids(struct qed_cxt_mngr *p_mngr,
+ struct qed_cdu_iids *iids)
+{
+ u32 type;
+
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
+ iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
+ iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+ }
+}
+
+/* counts the iids for the Searcher block configuration */
+struct qed_src_iids {
+ u32 pf_cids;
+ u32 per_vf_cids;
+};
+
+static void qed_cxt_src_iids(struct qed_cxt_mngr *p_mngr,
+ struct qed_src_iids *iids)
+{
+ u32 i;
+
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ if (!src_proto(i))
+ continue;
+
+ iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
+ iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
+ }
+}
+
+/* counts the iids for the Timers block configuration */
+struct qed_tm_iids {
+ u32 pf_cids;
+ u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
+ u32 pf_tids_total;
+ u32 per_vf_cids;
+ u32 per_vf_tids;
+};
+
+static void qed_cxt_tm_iids(struct qed_cxt_mngr *p_mngr,
+ struct qed_tm_iids *iids)
+{
+ u32 i, j;
+
+ for (i = 0; i < MAX_CONN_TYPES; i++) {
+ struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
+
+ if (tm_cid_proto(i)) {
+ iids->pf_cids += p_cfg->cid_count;
+ iids->per_vf_cids += p_cfg->cids_per_vf;
+ }
+ }
+
+ iids->pf_cids = roundup(iids->pf_cids, TM_ALIGN);
+ iids->per_vf_cids = roundup(iids->per_vf_cids, TM_ALIGN);
+ iids->per_vf_tids = roundup(iids->per_vf_tids, TM_ALIGN);
+
+ for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
+ iids->pf_tids[j] = roundup(iids->pf_tids[j], TM_ALIGN);
+ iids->pf_tids_total += iids->pf_tids[j];
+ }
}
static void qed_cxt_qm_iids(struct qed_hwfn *p_hwfn,
struct qed_qm_iids *iids)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- int type;
+ struct qed_tid_seg *segs;
+ u32 vf_cids = 0, type, j;
+ u32 vf_tids = 0;
- for (type = 0; type < MAX_CONN_TYPES; type++)
+ for (type = 0; type < MAX_CONN_TYPES; type++) {
iids->cids += p_mngr->conn_cfg[type].cid_count;
+ vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
+
+ segs = p_mngr->conn_cfg[type].tid_seg;
+ /* for each segment there is at most one
+ * protocol for which count is not 0.
+ */
+ for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
+ iids->tids += segs[j].count;
+
+ /* The last array elelment is for the VFs. As for PF
+ * segments there can be only one protocol for
+ * which this value is not 0.
+ */
+ vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
+ }
+
+ iids->vf_cids += vf_cids * p_mngr->vf_count;
+ iids->tids += vf_tids * p_mngr->vf_count;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_ILT,
+ "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
+ iids->cids, iids->vf_cids, iids->tids, vf_tids);
+}
+
+static struct qed_tid_seg *qed_cxt_tid_seg_info(struct qed_hwfn *p_hwfn,
+ u32 seg)
+{
+ struct qed_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
+ u32 i;
- DP_VERBOSE(p_hwfn, QED_MSG_ILT, "iids: CIDS %08x\n", iids->cids);
+ /* Find the protocol with tid count > 0 for this segment.
+ * Note: there can only be one and this is already validated.
+ */
+ for (i = 0; i < MAX_CONN_TYPES; i++)
+ if (p_cfg->conn_cfg[i].tid_seg[seg].count)
+ return &p_cfg->conn_cfg[i].tid_seg[seg];
+ return NULL;
+}
+
+void qed_cxt_set_srq_count(struct qed_hwfn *p_hwfn, u32 num_srqs)
+{
+ struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ p_mgr->srq_count = num_srqs;
+}
+
+u32 qed_cxt_get_srq_count(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
+
+ return p_mgr->srq_count;
}
/* set the iids count per protocol */
static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type,
- u32 cid_count)
+ u32 cid_count, u32 vf_cid_cnt)
{
struct qed_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
struct qed_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
p_conn->cid_count = roundup(cid_count, DQ_RANGE_ALIGN);
+ p_conn->cids_per_vf = roundup(vf_cid_cnt, DQ_RANGE_ALIGN);
+
+ if (type == PROTOCOLID_ROCE) {
+ u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
+ u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
+ u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+
+ p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
+ }
+}
+
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type,
+ u32 *vf_cid)
+{
+ if (vf_cid)
+ *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
+
+ return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+}
+
+u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
+ enum protocol_type type)
+{
+ return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
+}
+
+u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type)
+{
+ u32 cnt = 0;
+ int i;
+
+ for (i = 0; i < TASK_SEGMENTS; i++)
+ cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
+
+ return cnt;
+}
+
+static void
+qed_cxt_set_proto_tid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type proto,
+ u8 seg, u8 seg_type, u32 count, bool has_fl)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+ p_seg->count = count;
+ p_seg->has_fl_mem = has_fl;
+ p_seg->type = seg_type;
}
static void qed_ilt_cli_blk_fill(struct qed_ilt_client_cfg *p_cli,
@@ -205,15 +459,42 @@ static void qed_ilt_cli_adv_line(struct qed_hwfn *p_hwfn,
p_blk->real_size_in_page, p_blk->start_line);
}
+static u32 qed_ilt_get_dynamic_line_cnt(struct qed_hwfn *p_hwfn,
+ enum ilt_clients ilt_client)
+{
+ u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
+ struct qed_ilt_client_cfg *p_cli;
+ u32 lines_to_skip = 0;
+ u32 cxts_per_p;
+
+ if (ilt_client == ILT_CLI_CDUC) {
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+
+ cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
+ (u32) CONN_CXT_SIZE(p_hwfn);
+
+ lines_to_skip = cid_count / cxts_per_p;
+ }
+
+ return lines_to_skip;
+}
+
int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 curr_line, total, i, task_size, line;
struct qed_ilt_client_cfg *p_cli;
struct qed_ilt_cli_blk *p_blk;
- u32 curr_line, total, pf_cids;
+ struct qed_cdu_iids cdu_iids;
+ struct qed_src_iids src_iids;
struct qed_qm_iids qm_iids;
+ struct qed_tm_iids tm_iids;
+ struct qed_tid_seg *p_seg;
memset(&qm_iids, 0, sizeof(qm_iids));
+ memset(&cdu_iids, 0, sizeof(cdu_iids));
+ memset(&src_iids, 0, sizeof(src_iids));
+ memset(&tm_iids, 0, sizeof(tm_iids));
p_mngr->pf_start_line = RESC_START(p_hwfn, QED_ILT);
@@ -224,14 +505,16 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
/* CDUC */
p_cli = &p_mngr->clients[ILT_CLI_CDUC];
curr_line = p_mngr->pf_start_line;
+
+ /* CDUC PF */
p_cli->pf_total_lines = 0;
/* get the counters for the CDUC and QM clients */
- pf_cids = qed_cxt_cdu_iids(p_mngr);
+ qed_cxt_cdu_iids(p_mngr, &cdu_iids);
p_blk = &p_cli->pf_blks[CDUC_BLK];
- total = pf_cids * CONN_CXT_SIZE(p_hwfn);
+ total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
total, CONN_CXT_SIZE(p_hwfn));
@@ -239,17 +522,146 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ p_blk->dynamic_line_cnt = qed_ilt_get_dynamic_line_cnt(p_hwfn,
+ ILT_CLI_CDUC);
+
+ /* CDUC VF */
+ p_blk = &p_cli->vf_blks[CDUC_BLK];
+ total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total, CONN_CXT_SIZE(p_hwfn));
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
+ p_cli->vf_total_lines = curr_line - p_blk->start_line;
+
+ for (i = 1; i < p_mngr->vf_count; i++)
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUC);
+
+ /* CDUT PF */
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ p_cli->first.val = curr_line;
+
+ /* first the 'working' task memory */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+
+ /* next the 'init' task memory (forced load memory) */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg || p_seg->count == 0)
+ continue;
+
+ p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
+
+ if (!p_seg->has_fl_mem) {
+ /* The segment is active (total size pf 'working'
+ * memory is > 0) but has no FL (forced-load, Init)
+ * memory. Thus:
+ *
+ * 1. The total-size in the corrsponding FL block of
+ * the ILT client is set to 0 - No ILT line are
+ * provisioned and no ILT memory allocated.
+ *
+ * 2. The start-line of said block is set to the
+ * start line of the matching working memory
+ * block in the ILT client. This is later used to
+ * configure the CDU segment offset registers and
+ * results in an FL command for TIDs of this
+ * segement behaves as regular load commands
+ * (loading TIDs from the working memory).
+ */
+ line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+ continue;
+ }
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
+
+ /* CDUT VF */
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
+ if (p_seg && p_seg->count) {
+ /* Stricly speaking we need to iterate over all VF
+ * task segment types, but a VF has only 1 segment
+ */
+
+ /* 'working' memory */
+ total = p_seg->count * p_mngr->task_type_size[p_seg->type];
+
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total,
+ p_mngr->task_type_size[p_seg->type]);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+
+ /* 'init' memory */
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ if (!p_seg->has_fl_mem) {
+ /* see comment above */
+ line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
+ qed_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
+ } else {
+ task_size = p_mngr->task_type_size[p_seg->type];
+ qed_ilt_cli_blk_fill(p_cli, p_blk,
+ curr_line, total, task_size);
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ p_cli->vf_total_lines = curr_line -
+ p_cli->vf_blks[0].start_line;
+
+ /* Now for the rest of the VFs */
+ for (i = 1; i < p_mngr->vf_count; i++) {
+ p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+
+ p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_CDUT);
+ }
+ }
+
/* QM */
p_cli = &p_mngr->clients[ILT_CLI_QM];
p_blk = &p_cli->pf_blks[0];
qed_cxt_qm_iids(p_hwfn, &qm_iids);
- total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, 0, 0,
- p_hwfn->qm_info.num_pqs, 0);
-
- DP_VERBOSE(p_hwfn, QED_MSG_ILT,
- "QM ILT Info, (cids=%d, num_pqs=%d, memory_size=%d)\n",
- qm_iids.cids, p_hwfn->qm_info.num_pqs, total);
+ total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
+ qm_iids.vf_cids, qm_iids.tids,
+ p_hwfn->qm_info.num_pqs,
+ p_hwfn->qm_info.num_vf_pqs);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_ILT,
+ "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d, num_vf_pqs=%d, memory_size=%d)\n",
+ qm_iids.cids,
+ qm_iids.vf_cids,
+ qm_iids.tids,
+ p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
qed_ilt_cli_blk_fill(p_cli, p_blk,
curr_line, total * 0x1000,
@@ -258,6 +670,75 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ /* SRC */
+ p_cli = &p_mngr->clients[ILT_CLI_SRC];
+ qed_cxt_src_iids(p_mngr, &src_iids);
+
+ /* Both the PF and VFs searcher connections are stored in the per PF
+ * database. Thus sum the PF searcher cids and all the VFs searcher
+ * cids.
+ */
+ total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ if (total) {
+ u32 local_max = max_t(u32, total,
+ SRC_MIN_NUM_ELEMS);
+
+ total = roundup_pow_of_two(local_max);
+
+ p_blk = &p_cli->pf_blks[0];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * sizeof(struct src_ent),
+ sizeof(struct src_ent));
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_SRC);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ /* TM PF */
+ p_cli = &p_mngr->clients[ILT_CLI_TM];
+ qed_cxt_tm_iids(p_mngr, &tm_iids);
+ total = tm_iids.pf_cids + tm_iids.pf_tids_total;
+ if (total) {
+ p_blk = &p_cli->pf_blks[0];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
+ /* TM VF */
+ total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
+ if (total) {
+ p_blk = &p_cli->vf_blks[0];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * TM_ELEM_SIZE, TM_ELEM_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+
+ for (i = 1; i < p_mngr->vf_count; i++)
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TM);
+ }
+
+ /* TSDM (SRQ CONTEXT) */
+ total = qed_cxt_get_srq_count(p_hwfn);
+
+ if (total) {
+ p_cli = &p_mngr->clients[ILT_CLI_TSDM];
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ qed_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
+ total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
+
+ qed_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
+ ILT_CLI_TSDM);
+ p_cli->pf_total_lines = curr_line - p_blk->start_line;
+ }
+
if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
RESC_NUM(p_hwfn, QED_ILT)) {
DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
@@ -268,8 +749,122 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn)
return 0;
}
+static void qed_cxt_src_t2_free(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 i;
+
+ if (!p_mngr->t2)
+ return;
+
+ for (i = 0; i < p_mngr->t2_num_pages; i++)
+ if (p_mngr->t2[i].p_virt)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_mngr->t2[i].size,
+ p_mngr->t2[i].p_virt,
+ p_mngr->t2[i].p_phys);
+
+ kfree(p_mngr->t2);
+ p_mngr->t2 = NULL;
+}
+
+static int qed_cxt_src_t2_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 conn_num, total_size, ent_per_page, psz, i;
+ struct qed_ilt_client_cfg *p_src;
+ struct qed_src_iids src_iids;
+ struct qed_dma_mem *p_t2;
+ int rc;
+
+ memset(&src_iids, 0, sizeof(src_iids));
+
+ /* if the SRC ILT client is inactive - there are no connection
+ * requiring the searcer, leave.
+ */
+ p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
+ if (!p_src->active)
+ return 0;
+
+ qed_cxt_src_iids(p_mngr, &src_iids);
+ conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ total_size = conn_num * sizeof(struct src_ent);
+
+ /* use the same page size as the SRC ILT client */
+ psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
+ p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
+
+ /* allocate t2 */
+ p_mngr->t2 = kzalloc(p_mngr->t2_num_pages * sizeof(struct qed_dma_mem),
+ GFP_KERNEL);
+ if (!p_mngr->t2) {
+ DP_NOTICE(p_hwfn, "Failed to allocate t2 table\n");
+ rc = -ENOMEM;
+ goto t2_fail;
+ }
+
+ /* allocate t2 pages */
+ for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ u32 size = min_t(u32, total_size, psz);
+ void **p_virt = &p_mngr->t2[i].p_virt;
+
+ *p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ &p_mngr->t2[i].p_phys, GFP_KERNEL);
+ if (!p_mngr->t2[i].p_virt) {
+ rc = -ENOMEM;
+ goto t2_fail;
+ }
+ memset(*p_virt, 0, size);
+ p_mngr->t2[i].size = size;
+ total_size -= size;
+ }
+
+ /* Set the t2 pointers */
+
+ /* entries per page - must be a power of two */
+ ent_per_page = psz / sizeof(struct src_ent);
+
+ p_mngr->first_free = (u64) p_mngr->t2[0].p_phys;
+
+ p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
+ p_mngr->last_free = (u64) p_t2->p_phys +
+ ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
+
+ for (i = 0; i < p_mngr->t2_num_pages; i++) {
+ u32 ent_num = min_t(u32,
+ ent_per_page,
+ conn_num);
+ struct src_ent *entries = p_mngr->t2[i].p_virt;
+ u64 p_ent_phys = (u64) p_mngr->t2[i].p_phys, val;
+ u32 j;
+
+ for (j = 0; j < ent_num - 1; j++) {
+ val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
+ entries[j].next = cpu_to_be64(val);
+ }
+
+ if (i < p_mngr->t2_num_pages - 1)
+ val = (u64) p_mngr->t2[i + 1].p_phys;
+ else
+ val = 0;
+ entries[j].next = cpu_to_be64(val);
+
+ conn_num -= ent_num;
+ }
+
+ return 0;
+
+t2_fail:
+ qed_cxt_src_t2_free(p_hwfn);
+ return rc;
+}
+
#define for_each_ilt_valid_client(pos, clients) \
- for (pos = 0; pos < ILT_CLI_MAX; pos++)
+ for (pos = 0; pos < ILT_CLI_MAX; pos++) \
+ if (!clients[pos].active) { \
+ continue; \
+ } else \
/* Total number of ILT lines used by this PF */
static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
@@ -277,12 +872,8 @@ static u32 qed_cxt_ilt_shadow_size(struct qed_ilt_client_cfg *ilt_clients)
u32 size = 0;
u32 i;
- for_each_ilt_valid_client(i, ilt_clients) {
- if (!ilt_clients[i].active)
- continue;
- size += (ilt_clients[i].last.val -
- ilt_clients[i].first.val + 1);
- }
+ for_each_ilt_valid_client(i, ilt_clients)
+ size += (ilt_clients[i].last.val - ilt_clients[i].first.val + 1);
return size;
}
@@ -313,15 +904,22 @@ static int qed_ilt_blk_alloc(struct qed_hwfn *p_hwfn,
u32 start_line_offset)
{
struct qed_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
- u32 lines, line, sz_left;
+ u32 lines, line, sz_left, lines_to_skip = 0;
+
+ /* Special handling for RoCE that supports dynamic allocation */
+ if ((p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) &&
+ ((ilt_client == ILT_CLI_CDUT) || ilt_client == ILT_CLI_TSDM))
+ return 0;
+
+ lines_to_skip = p_blk->dynamic_line_cnt;
if (!p_blk->total_size)
return 0;
sz_left = p_blk->total_size;
- lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page);
+ lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
line = p_blk->start_line + start_line_offset -
- p_hwfn->p_cxt_mngr->pf_start_line;
+ p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
for (; lines; lines--) {
dma_addr_t p_phys;
@@ -358,7 +956,7 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
struct qed_ilt_client_cfg *clients = p_mngr->clients;
struct qed_ilt_cli_blk *p_blk;
- u32 size, i, j;
+ u32 size, i, j, k;
int rc;
size = qed_cxt_ilt_shadow_size(clients);
@@ -375,14 +973,22 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
(u32)(size * sizeof(struct qed_dma_mem)));
for_each_ilt_valid_client(i, clients) {
- if (!clients[i].active)
- continue;
for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
p_blk = &clients[i].pf_blks[j];
rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
if (rc != 0)
goto ilt_shadow_fail;
}
+ for (k = 0; k < p_mngr->vf_count; k++) {
+ for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
+ u32 lines = clients[i].vf_total_lines * k;
+
+ p_blk = &clients[i].vf_blks[j];
+ rc = qed_ilt_blk_alloc(p_hwfn, p_blk, i, lines);
+ if (rc != 0)
+ goto ilt_shadow_fail;
+ }
+ }
}
return 0;
@@ -445,6 +1051,7 @@ cid_map_fail:
int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
{
+ struct qed_ilt_client_cfg *clients;
struct qed_cxt_mngr *p_mngr;
u32 i;
@@ -455,18 +1062,43 @@ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn)
}
/* Initialize ILT client registers */
- p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
- p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
- p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
-
- p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
- p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
- p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
-
+ clients = p_mngr->clients;
+ clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
+ clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
+ clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
+
+ clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
+ clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
+ clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
+
+ clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
+ clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
+ clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
+
+ clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
+ clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
+ clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
+
+ clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
+ clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
+ clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
+
+ clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
+ clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
+ clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
/* default ILT page size for all clients is 32K */
for (i = 0; i < ILT_CLI_MAX; i++)
p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
+ /* Initialize task sizes */
+ p_mngr->task_type_size[0] = TYPE0_TASK_CXT_SIZE(p_hwfn);
+ p_mngr->task_type_size[1] = TYPE1_TASK_CXT_SIZE(p_hwfn);
+
+ if (p_hwfn->cdev->p_iov_info)
+ p_mngr->vf_count = p_hwfn->cdev->p_iov_info->total_vfs;
+ /* Initialize the dynamic ILT allocation mutex */
+ mutex_init(&p_mngr->mutex);
+
/* Set the cxt mangr pointer priori to further allocations */
p_hwfn->p_cxt_mngr = p_mngr;
@@ -484,6 +1116,13 @@ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn)
goto tables_alloc_fail;
}
+ /* Allocate the T2 table */
+ rc = qed_cxt_src_t2_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to allocate T2 memory\n");
+ goto tables_alloc_fail;
+ }
+
/* Allocate and initialize the acquired cids bitmaps */
rc = qed_cid_map_alloc(p_hwfn);
if (rc) {
@@ -504,6 +1143,7 @@ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
return;
qed_cid_map_free(p_hwfn);
+ qed_cxt_src_t2_free(p_hwfn);
qed_ilt_shadow_free(p_hwfn);
kfree(p_hwfn->p_cxt_mngr);
@@ -548,6 +1188,48 @@ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
#define CDUC_NCIB_MASK \
(CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
+#define CDUT_TYPE0_CXT_SIZE_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
+
+#define CDUT_TYPE0_CXT_SIZE_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
+ CDUT_TYPE0_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE0_BLOCK_WASTE_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
+ CDUT_TYPE0_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE0_NCIB_SHIFT \
+ CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE0_NCIB_MASK \
+ (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
+ CDUT_TYPE0_NCIB_SHIFT)
+
+#define CDUT_TYPE1_CXT_SIZE_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
+
+#define CDUT_TYPE1_CXT_SIZE_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
+ CDUT_TYPE1_CXT_SIZE_SHIFT)
+
+#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
+
+#define CDUT_TYPE1_BLOCK_WASTE_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
+ CDUT_TYPE1_BLOCK_WASTE_SHIFT)
+
+#define CDUT_TYPE1_NCIB_SHIFT \
+ CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
+
+#define CDUT_TYPE1_NCIB_MASK \
+ (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
+ CDUT_TYPE1_NCIB_SHIFT)
+
static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
{
u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
@@ -562,6 +1244,92 @@ static void qed_cdu_init_common(struct qed_hwfn *p_hwfn)
SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
+
+ /* CDUT - type-0 tasks configuration */
+ page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
+ cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ /* cxt size and block-waste are multipes of 8 */
+ cdu_params = 0;
+ SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
+
+ /* CDUT - type-1 tasks configuration */
+ cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
+ elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
+ block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
+
+ /* cxt size and block-waste are multipes of 8 */
+ cdu_params = 0;
+ SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
+ SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
+ STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
+}
+
+/* CDU PF */
+#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
+#define CDU_SEG_REG_TYPE_MASK 0x1
+#define CDU_SEG_REG_OFFSET_SHIFT 0
+#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
+
+static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_tid_seg *p_seg;
+ u32 cdu_seg_params, offset;
+ int i;
+
+ static const u32 rt_type_offset_arr[] = {
+ CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
+ };
+
+ static const u32 rt_type_offset_fl_arr[] = {
+ CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
+ CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
+ };
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+
+ /* There are initializations only for CDUT during pf Phase */
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ /* Segment 0 */
+ p_seg = qed_cxt_tid_seg_info(p_hwfn, i);
+ if (!p_seg)
+ continue;
+
+ /* Note: start_line is already adjusted for the CDU
+ * segment register granularity, so we just need to
+ * divide. Adjustment is implicit as we assume ILT
+ * Page size is larger than 32K!
+ */
+ offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+ (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+ cdu_seg_params = 0;
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+ STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
+
+ offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
+ (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
+ p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
+
+ cdu_seg_params = 0;
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
+ SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
+ STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
+ }
}
void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
@@ -579,8 +1347,10 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn)
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.is_first_pf = p_hwfn->first_on_engine;
params.num_pf_cids = iids.cids;
+ params.num_vf_cids = iids.vf_cids;
params.start_pq = qm_info->start_pq;
- params.num_pf_pqs = qm_info->num_pqs;
+ params.num_pf_pqs = qm_info->num_pqs - qm_info->num_vf_pqs;
+ params.num_vf_pqs = qm_info->num_vf_pqs;
params.start_vport = qm_info->start_vport;
params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq;
@@ -610,26 +1380,55 @@ static int qed_cm_init_pf(struct qed_hwfn *p_hwfn)
static void qed_dq_init_pf(struct qed_hwfn *p_hwfn)
{
struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
- u32 dq_pf_max_cid = 0;
+ u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
+ dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
- /* 5 - PF */
+ dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
+
dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
+
+ dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
+
+ /* Connection types 6 & 7 are not in use, yet they must be configured
+ * as the highest possible connection. Not configuring them means the
+ * defaults will be used, and with a large number of cids a bug may
+ * occur, if the defaults will be smaller than dq_pf_max_cid /
+ * dq_vf_max_cid.
+ */
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
+
+ STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
+ STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
}
static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
@@ -639,20 +1438,76 @@ static void qed_ilt_bounds_init(struct qed_hwfn *p_hwfn)
ilt_clients = p_hwfn->p_cxt_mngr->clients;
for_each_ilt_valid_client(i, ilt_clients) {
- if (!ilt_clients[i].active)
- continue;
STORE_RT_REG(p_hwfn,
ilt_clients[i].first.reg,
ilt_clients[i].first.val);
STORE_RT_REG(p_hwfn,
- ilt_clients[i].last.reg,
- ilt_clients[i].last.val);
+ ilt_clients[i].last.reg, ilt_clients[i].last.val);
STORE_RT_REG(p_hwfn,
ilt_clients[i].p_size.reg,
ilt_clients[i].p_size.val);
}
}
+static void qed_ilt_vf_bounds_init(struct qed_hwfn *p_hwfn)
+{
+ struct qed_ilt_client_cfg *p_cli;
+ u32 blk_factor;
+
+ /* For simplicty we set the 'block' to be an ILT page */
+ if (p_hwfn->cdev->p_iov_info) {
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_BASE_RT_OFFSET,
+ p_iov->first_vf_in_pf);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
+ p_iov->first_vf_in_pf + p_iov->total_vfs);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
+ blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
+ blk_factor = ilog2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
+ if (p_cli->active) {
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
+ p_cli->pf_total_lines);
+ STORE_RT_REG(p_hwfn,
+ PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
+ p_cli->vf_total_lines);
+ }
+}
+
/* ILT (PSWRQ2) PF */
static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
{
@@ -662,15 +1517,13 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
u32 line, rt_offst, i;
qed_ilt_bounds_init(p_hwfn);
+ qed_ilt_vf_bounds_init(p_hwfn);
p_mngr = p_hwfn->p_cxt_mngr;
p_shdw = p_mngr->ilt_shadow;
clients = p_hwfn->p_cxt_mngr->clients;
for_each_ilt_valid_client(i, clients) {
- if (!clients[i].active)
- continue;
-
/** Client's 1st val and RT array are absolute, ILT shadows'
* lines are relative.
*/
@@ -701,6 +1554,137 @@ static void qed_ilt_init_pf(struct qed_hwfn *p_hwfn)
}
}
+/* SRC (Searcher) PF */
+static void qed_src_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 rounded_conn_num, conn_num, conn_max;
+ struct qed_src_iids src_iids;
+
+ memset(&src_iids, 0, sizeof(src_iids));
+ qed_cxt_src_iids(p_mngr, &src_iids);
+ conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
+ if (!conn_num)
+ return;
+
+ conn_max = max_t(u32, conn_num, SRC_MIN_NUM_ELEMS);
+ rounded_conn_num = roundup_pow_of_two(conn_max);
+
+ STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
+ STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
+ ilog2(rounded_conn_num));
+
+ STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
+ p_hwfn->p_cxt_mngr->first_free);
+ STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
+ p_hwfn->p_cxt_mngr->last_free);
+}
+
+/* Timers PF */
+#define TM_CFG_NUM_IDS_SHIFT 0
+#define TM_CFG_NUM_IDS_MASK 0xFFFFULL
+#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
+#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
+#define TM_CFG_PARENT_PF_SHIFT 25
+#define TM_CFG_PARENT_PF_MASK 0x7ULL
+
+#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
+#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+#define TM_CFG_TID_OFFSET_SHIFT 30
+#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
+#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
+#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
+
+static void qed_tm_init_pf(struct qed_hwfn *p_hwfn)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 active_seg_mask = 0, tm_offset, rt_reg;
+ struct qed_tm_iids tm_iids;
+ u64 cfg_word;
+ u8 i;
+
+ memset(&tm_iids, 0, sizeof(tm_iids));
+ qed_cxt_tm_iids(p_mngr, &tm_iids);
+
+ /* @@@TBD No pre-scan for now */
+
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0);
+ rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ }
+
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
+ SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
+
+ rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->cdev) + p_hwfn->rel_pf_id);
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+
+ /* enale scan */
+ STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
+ tm_iids.pf_cids ? 0x1 : 0x0);
+
+ /* @@@TBD how to enable the scan for the VFs */
+
+ tm_offset = tm_iids.per_vf_cids;
+
+ /* Note: We assume consecutive VFs for a PF */
+ for (i = 0; i < p_mngr->vf_count; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
+ SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+ SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
+
+ rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (p_hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ }
+
+ tm_offset = tm_iids.pf_cids;
+ for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
+ cfg_word = 0;
+ SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
+ SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
+ SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
+ SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
+ SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64) 0);
+
+ rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
+ (sizeof(cfg_word) / sizeof(u32)) *
+ (NUM_OF_VFS(p_hwfn->cdev) +
+ p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
+
+ STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
+ active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
+
+ tm_offset += tm_iids.pf_tids[i];
+ }
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE)
+ active_seg_mask = 0;
+
+ STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
+
+ /* @@@TBD how to enable the scan for the VFs */
+}
+
void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
{
qed_cdu_init_common(p_hwfn);
@@ -711,7 +1695,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn)
qed_qm_init_pf(p_hwfn);
qed_cm_init_pf(p_hwfn);
qed_dq_init_pf(p_hwfn);
+ qed_cdu_init_pf(p_hwfn);
qed_ilt_init_pf(p_hwfn);
+ qed_src_init_pf(p_hwfn);
+ qed_tm_init_pf(p_hwfn);
}
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
@@ -832,17 +1819,439 @@ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
return 0;
}
-int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+void qed_rdma_set_pf_params(struct qed_hwfn *p_hwfn,
+ struct qed_rdma_pf_params *p_params)
{
- struct qed_eth_pf_params *p_params = &p_hwfn->pf_params.eth_pf_params;
+ u32 num_cons, num_tasks, num_qps, num_mrs, num_srqs;
+ enum protocol_type proto;
+
+ num_mrs = min_t(u32, RDMA_MAX_TIDS, p_params->num_mrs);
+ num_tasks = num_mrs; /* each mr uses a single task id */
+ num_srqs = min_t(u32, 32 * 1024, p_params->num_srqs);
+
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH_ROCE:
+ num_qps = min_t(u32, ROCE_MAX_QPS, p_params->num_qps);
+ num_cons = num_qps * 2; /* each QP requires two connections */
+ proto = PROTOCOLID_ROCE;
+ break;
+ default:
+ return;
+ }
+
+ if (num_cons && num_tasks) {
+ qed_cxt_set_proto_cid_count(p_hwfn, proto, num_cons, 0);
+ /* Deliberatly passing ROCE for tasks id. This is because
+ * iWARP / RoCE share the task id.
+ */
+ qed_cxt_set_proto_tid_count(p_hwfn, PROTOCOLID_ROCE,
+ QED_CXT_ROCE_TID_SEG, 1,
+ num_tasks, false);
+ qed_cxt_set_srq_count(p_hwfn, num_srqs);
+ } else {
+ DP_INFO(p_hwfn->cdev,
+ "RDMA personality used without setting params!\n");
+ }
+}
+
+int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn)
+{
/* Set the number of required CORE connections */
u32 core_cids = 1; /* SPQ */
- qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids);
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
+
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH_ROCE:
+ {
+ qed_rdma_set_pf_params(p_hwfn,
+ &p_hwfn->
+ pf_params.rdma_pf_params);
+ /* no need for break since RoCE coexist with Ethernet */
+ }
+ case QED_PCI_ETH:
+ {
+ struct qed_eth_pf_params *p_params =
+ &p_hwfn->pf_params.eth_pf_params;
+
+ qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
+ p_params->num_cons, 1);
+ break;
+ }
+ case QED_PCI_ISCSI:
+ {
+ struct qed_iscsi_pf_params *p_params;
+
+ p_params = &p_hwfn->pf_params.iscsi_pf_params;
+
+ if (p_params->num_cons && p_params->num_tasks) {
+ qed_cxt_set_proto_cid_count(p_hwfn,
+ PROTOCOLID_ISCSI,
+ p_params->num_cons,
+ 0);
+
+ qed_cxt_set_proto_tid_count(p_hwfn,
+ PROTOCOLID_ISCSI,
+ QED_CXT_ISCSI_TID_SEG,
+ 0,
+ p_params->num_tasks,
+ true);
+ } else {
+ DP_INFO(p_hwfn->cdev,
+ "Iscsi personality used without setting params!\n");
+ }
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
+ struct qed_tid_mem *p_info)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ u32 proto, seg, total_lines, i, shadow_line;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_fl_seg;
+ struct qed_tid_seg *p_seg_info;
+
+ /* Verify the personality */
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ISCSI:
+ proto = PROTOCOLID_ISCSI;
+ seg = QED_CXT_ISCSI_TID_SEG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ if (!p_cli->active)
+ return -EINVAL;
+
+ p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+ if (!p_seg_info->has_fl_mem)
+ return -EINVAL;
+
+ p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+ total_lines = DIV_ROUND_UP(p_fl_seg->total_size,
+ p_fl_seg->real_size_in_page);
+
+ for (i = 0; i < total_lines; i++) {
+ shadow_line = i + p_fl_seg->start_line -
+ p_hwfn->p_cxt_mngr->pf_start_line;
+ p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt;
+ }
+ p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) -
+ p_fl_seg->real_size_in_page;
+ p_info->tid_size = p_mngr->task_type_size[p_seg_info->type];
+ p_info->num_tids_per_block = p_fl_seg->real_size_in_page /
+ p_info->tid_size;
+
+ return 0;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+int
+qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
+ enum qed_cxt_elem_type elem_type, u32 iid)
+{
+ u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ struct qed_ptt *p_ptt;
+ dma_addr_t p_phys;
+ u64 ilt_hw_entry;
+ void *p_virt;
+ int rc = 0;
+
+ switch (elem_type) {
+ case QED_ELEM_CXT:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ elem_size = CONN_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+ break;
+ case QED_ELEM_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case QED_ELEM_TASK:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
+ return -EINVAL;
+ }
+
+ /* Calculate line in ilt */
+ hw_p_size = p_cli->p_size.val;
+ elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+ line = p_blk->start_line + (iid / elems_per_p);
+ shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+ /* If line is already allocated, do nothing, otherwise allocate it and
+ * write it to the PSWRQ2 registers.
+ * This section can be run in parallel from different contexts and thus
+ * a mutex protection is needed.
+ */
+
+ mutex_lock(&p_hwfn->p_cxt_mngr->mutex);
+
+ if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
+ goto out0;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn,
+ "QED_TIME_OUT on ptt acquire - dynamic allocation");
+ rc = -EBUSY;
+ goto out0;
+ }
+
+ p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_blk->real_size_in_page,
+ &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ rc = -ENOMEM;
+ goto out1;
+ }
+ memset(p_virt, 0, p_blk->real_size_in_page);
+
+ /* configuration of refTagMask to 0xF is required for RoCE DIF MR only,
+ * to compensate for a HW bug, but it is configured even if DIF is not
+ * enabled. This is harmless and allows us to avoid a dedicated API. We
+ * configure the field for all of the contexts on the newly allocated
+ * page.
+ */
+ if (elem_type == QED_ELEM_TASK) {
+ u32 elem_i;
+ u8 *elem_start = (u8 *)p_virt;
+ union type1_task_context *elem;
+
+ for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
+ elem = (union type1_task_context *)elem_start;
+ SET_FIELD(elem->roce_ctx.tdif_context.flags1,
+ TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
+ elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
+ }
+ }
+
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
+ p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
+ p_blk->real_size_in_page;
+
+ /* compute absolute offset */
+ reg_offset = PSWRQ2_REG_ILT_MEMORY +
+ (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
+
+ ilt_hw_entry = 0;
+ SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
+ SET_FIELD(ilt_hw_entry,
+ ILT_ENTRY_PHY_ADDR,
+ (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
+
+ /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
+ qed_dmae_host2grc(p_hwfn, p_ptt, (u64) (uintptr_t)&ilt_hw_entry,
+ reg_offset, sizeof(ilt_hw_entry) / sizeof(u32), 0);
+
+ if (elem_type == QED_ELEM_CXT) {
+ u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
+ elems_per_p;
+
+ /* Update the relevant register in the parser */
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
+ last_cid_allocated - 1);
+
+ if (!p_hwfn->b_rdma_enabled_in_prs) {
+ /* Enable RoCE search */
+ qed_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
+ p_hwfn->b_rdma_enabled_in_prs = true;
+ }
+ }
+
+out1:
+ qed_ptt_release(p_hwfn, p_ptt);
+out0:
+ mutex_unlock(&p_hwfn->p_cxt_mngr->mutex);
+
+ return rc;
+}
+
+/* This function is very RoCE oriented, if another protocol in the future
+ * will want this feature we'll need to modify the function to be more generic
+ */
+static int
+qed_cxt_free_ilt_range(struct qed_hwfn *p_hwfn,
+ enum qed_cxt_elem_type elem_type,
+ u32 start_iid, u32 count)
+{
+ u32 start_line, end_line, shadow_start_line, shadow_end_line;
+ u32 reg_offset, elem_size, hw_p_size, elems_per_p;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_blk;
+ u32 end_iid = start_iid + count;
+ struct qed_ptt *p_ptt;
+ u64 ilt_hw_entry = 0;
+ u32 i;
+
+ switch (elem_type) {
+ case QED_ELEM_CXT:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
+ elem_size = CONN_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUC_BLK];
+ break;
+ case QED_ELEM_SRQ:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
+ elem_size = SRQ_CXT_SIZE;
+ p_blk = &p_cli->pf_blks[SRQ_BLK];
+ break;
+ case QED_ELEM_TASK:
+ p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
+ elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
+ p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(QED_CXT_ROCE_TID_SEG)];
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "-EINVALID elem type = %d", elem_type);
+ return -EINVAL;
+ }
+
+ /* Calculate line in ilt */
+ hw_p_size = p_cli->p_size.val;
+ elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
+ start_line = p_blk->start_line + (start_iid / elems_per_p);
+ end_line = p_blk->start_line + (end_iid / elems_per_p);
+ if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
+ end_line--;
+
+ shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
+ shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_NOTICE(p_hwfn,
+ "QED_TIME_OUT on ptt acquire - dynamic allocation");
+ return -EBUSY;
+ }
+
+ for (i = shadow_start_line; i < shadow_end_line; i++) {
+ if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
+ continue;
+
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys);
+
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = NULL;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
+ p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
+
+ /* compute absolute offset */
+ reg_offset = PSWRQ2_REG_ILT_MEMORY +
+ ((start_line++) * ILT_REG_SIZE_IN_BYTES *
+ ILT_ENTRY_IN_REGS);
+
+ /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
+ * wide-bus.
+ */
+ qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64) (uintptr_t) &ilt_hw_entry,
+ reg_offset,
+ sizeof(ilt_hw_entry) / sizeof(u32),
+ 0);
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ return 0;
+}
+
+int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto)
+{
+ int rc;
+ u32 cid;
+
+ /* Free Connection CXT */
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_CXT,
+ qed_cxt_get_proto_cid_start(p_hwfn,
+ proto),
+ qed_cxt_get_proto_cid_count(p_hwfn,
+ proto, &cid));
+
+ if (rc)
+ return rc;
+
+ /* Free Task CXT */
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_TASK, 0,
+ qed_cxt_get_proto_tid_count(p_hwfn, proto));
+ if (rc)
+ return rc;
+
+ /* Free TSDM CXT */
+ rc = qed_cxt_free_ilt_range(p_hwfn, QED_ELEM_SRQ, 0,
+ qed_cxt_get_srq_count(p_hwfn));
+
+ return rc;
+}
+
+int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
+ u32 tid, u8 ctx_type, void **pp_task_ctx)
+{
+ struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+ struct qed_ilt_client_cfg *p_cli;
+ struct qed_ilt_cli_blk *p_seg;
+ struct qed_tid_seg *p_seg_info;
+ u32 proto, seg;
+ u32 total_lines;
+ u32 tid_size, ilt_idx;
+ u32 num_tids_per_block;
+
+ /* Verify the personality */
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ISCSI:
+ proto = PROTOCOLID_ISCSI;
+ seg = QED_CXT_ISCSI_TID_SEG;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ p_cli = &p_mngr->clients[ILT_CLI_CDUT];
+ if (!p_cli->active)
+ return -EINVAL;
+
+ p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg];
+
+ if (ctx_type == QED_CTX_WORKING_MEM) {
+ p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)];
+ } else if (ctx_type == QED_CTX_FL_MEM) {
+ if (!p_seg_info->has_fl_mem)
+ return -EINVAL;
+ p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)];
+ } else {
+ return -EINVAL;
+ }
+ total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page);
+ tid_size = p_mngr->task_type_size[p_seg_info->type];
+ num_tids_per_block = p_seg->real_size_in_page / tid_size;
+
+ if (total_lines < tid / num_tids_per_block)
+ return -EINVAL;
- qed_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
- p_params->num_cons);
+ ilt_idx = tid / num_tids_per_block + p_seg->start_line -
+ p_mngr->pf_start_line;
+ *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt +
+ (tid % num_tids_per_block) * tid_size;
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index c8e1f5e5c42b..c6f6f2e8192d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -21,6 +21,14 @@ struct qed_cxt_info {
enum protocol_type type;
};
+#define MAX_TID_BLOCKS 512
+struct qed_tid_mem {
+ u32 tid_size;
+ u32 num_tids_per_block;
+ u32 waste;
+ u8 *blocks[MAX_TID_BLOCKS]; /* 4K */
+};
+
/**
* @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
*
@@ -46,11 +54,28 @@ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
struct qed_cxt_info *p_info);
+/**
+ * @brief qed_cxt_get_tid_mem_info
+ *
+ * @param p_hwfn
+ * @param p_info
+ *
+ * @return int
+ */
+int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
+ struct qed_tid_mem *p_info);
+
+#define QED_CXT_ISCSI_TID_SEG PROTOCOLID_ISCSI
+#define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE
enum qed_cxt_elem_type {
QED_ELEM_CXT,
+ QED_ELEM_SRQ,
QED_ELEM_TASK
};
+u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
+ enum protocol_type type, u32 *vf_cid);
+
/**
* @brief qed_cxt_set_pf_params - Set the PF params for cxt init
*
@@ -128,6 +153,16 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn);
void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
/**
+ * @brief Reconfigures QM pf on the fly
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @return int
+ */
+int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
* @brief qed_cxt_release - Release a cid
*
* @param p_hwfn
@@ -136,4 +171,6 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn);
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
u32 cid);
+#define QED_CTX_WORKING_MEM 0
+#define QED_CTX_FL_MEM 1
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
new file mode 100644
index 000000000000..3656d2fd673d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -0,0 +1,2313 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bitops.h>
+#include <linux/dcbnl.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include "qed.h"
+#include "qed_cxt.h"
+#include "qed_dcbx.h"
+#include "qed_hsi.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#ifdef CONFIG_DCB
+#include <linux/qed/qed_eth_if.h>
+#endif
+
+#define QED_DCBX_MAX_MIB_READ_TRY (100)
+#define QED_ETH_TYPE_DEFAULT (0)
+#define QED_ETH_TYPE_ROCE (0x8915)
+#define QED_UDP_PORT_TYPE_ROCE_V2 (0x12B7)
+#define QED_ETH_TYPE_FCOE (0x8906)
+#define QED_TCP_PORT_ISCSI (0xCBC)
+
+#define QED_DCBX_INVALID_PRIORITY 0xFF
+
+/* Get Traffic Class from priority traffic class table, 4 bits represent
+ * the traffic class corresponding to the priority.
+ */
+#define QED_DCBX_PRIO2TC(prio_tc_tbl, prio) \
+ ((u32)(prio_tc_tbl >> ((7 - prio) * 4)) & 0x7)
+
+static const struct qed_dcbx_app_metadata qed_dcbx_app_update[] = {
+ {DCBX_PROTOCOL_ISCSI, "ISCSI", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_FCOE, "FCOE", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_ROCE, "ROCE", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_ROCE_V2, "ROCE_V2", QED_PCI_DEFAULT},
+ {DCBX_PROTOCOL_ETH, "ETH", QED_PCI_ETH}
+};
+
+static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
+{
+ return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_ETHTYPE);
+}
+
+static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
+{
+ u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
+}
+
+static bool qed_dcbx_app_port(u32 app_info_bitmap)
+{
+ return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
+ DCBX_APP_SF_PORT);
+}
+
+static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
+{
+ u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return qed_dcbx_app_port(app_info_bitmap);
+
+ return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
+}
+
+static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool ethtype;
+
+ if (ieee)
+ ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT));
+}
+
+static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool port;
+
+ if (ieee)
+ port = qed_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_TCP_PORT);
+ else
+ port = qed_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == QED_TCP_PORT_ISCSI));
+}
+
+static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool ethtype;
+
+ if (ieee)
+ ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE));
+}
+
+static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool ethtype;
+
+ if (ieee)
+ ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE));
+}
+
+static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool port;
+
+ if (ieee)
+ port = qed_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_UDP_PORT);
+ else
+ port = qed_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2));
+}
+
+static void
+qed_dcbx_dp_protocol(struct qed_hwfn *p_hwfn, struct qed_dcbx_results *p_data)
+{
+ enum dcbx_protocol_type id;
+ int i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "DCBX negotiated: %d\n",
+ p_data->dcbx_enabled);
+
+ for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
+ id = qed_dcbx_app_update[i].id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n",
+ qed_dcbx_app_update[i].name, p_data->arr[id].update,
+ p_data->arr[id].enable, p_data->arr[id].priority,
+ p_data->arr[id].tc, p_hwfn->hw_info.num_tc);
+ }
+}
+
+static void
+qed_dcbx_set_params(struct qed_dcbx_results *p_data,
+ struct qed_hw_info *p_info,
+ bool enable,
+ bool update,
+ u8 prio,
+ u8 tc,
+ enum dcbx_protocol_type type,
+ enum qed_pci_personality personality)
+{
+ /* PF update ramrod data */
+ p_data->arr[type].update = update;
+ p_data->arr[type].enable = enable;
+ p_data->arr[type].priority = prio;
+ p_data->arr[type].tc = tc;
+
+ /* QM reconf data */
+ if (p_info->personality == personality) {
+ if (personality == QED_PCI_ETH)
+ p_info->non_offload_tc = tc;
+ else
+ p_info->offload_tc = tc;
+ }
+}
+
+/* Update app protocol data and hw_info fields with the TLV info */
+static void
+qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
+ struct qed_hwfn *p_hwfn,
+ bool enable,
+ bool update,
+ u8 prio, u8 tc, enum dcbx_protocol_type type)
+{
+ struct qed_hw_info *p_info = &p_hwfn->hw_info;
+ enum qed_pci_personality personality;
+ enum dcbx_protocol_type id;
+ char *name;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(qed_dcbx_app_update); i++) {
+ id = qed_dcbx_app_update[i].id;
+
+ if (type != id)
+ continue;
+
+ personality = qed_dcbx_app_update[i].personality;
+ name = qed_dcbx_app_update[i].name;
+
+ qed_dcbx_set_params(p_data, p_info, enable, update,
+ prio, tc, type, personality);
+ }
+}
+
+static bool
+qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
+ u32 app_prio_bitmap,
+ u16 id, enum dcbx_protocol_type *type, bool ieee)
+{
+ if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_FCOE;
+ } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_ROCE;
+ } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_ISCSI;
+ } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_ETH;
+ } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) {
+ *type = DCBX_PROTOCOL_ROCE_V2;
+ } else {
+ *type = DCBX_MAX_PROTOCOL_TYPE;
+ DP_ERR(p_hwfn,
+ "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
+ id, app_prio_bitmap);
+ return false;
+ }
+
+ return true;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static int
+qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_results *p_data,
+ struct dcbx_app_priority_entry *p_tbl,
+ u32 pri_tc_tbl, int count, u8 dcbx_version)
+{
+ u8 tc, priority_map;
+ enum dcbx_protocol_type type;
+ bool enable, ieee;
+ u16 protocol_id;
+ int priority;
+ int i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
+
+ ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
+ /* Parse APP TLV */
+ for (i = 0; i < count; i++) {
+ protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ priority_map = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PRI_MAP);
+ priority = ffs(priority_map) - 1;
+ if (priority < 0) {
+ DP_ERR(p_hwfn, "Invalid priority\n");
+ return -EINVAL;
+ }
+
+ tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority);
+ if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ protocol_id, &type, ieee)) {
+ /* ETH always have the enable bit reset, as it gets
+ * vlan information per packet. For other protocols,
+ * should be set according to the dcbx_enabled
+ * indication, but we only got here if there was an
+ * app tlv for the protocol, so dcbx must be enabled.
+ */
+ enable = !(type == DCBX_PROTOCOL_ETH);
+
+ qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ priority, tc, type);
+ }
+ }
+
+ /* If RoCE-V2 TLV is not detected, driver need to use RoCE app
+ * data for RoCE-v2 not the default app data.
+ */
+ if (!p_data->arr[DCBX_PROTOCOL_ROCE_V2].update &&
+ p_data->arr[DCBX_PROTOCOL_ROCE].update) {
+ tc = p_data->arr[DCBX_PROTOCOL_ROCE].tc;
+ priority = p_data->arr[DCBX_PROTOCOL_ROCE].priority;
+ qed_dcbx_update_app_info(p_data, p_hwfn, true, true,
+ priority, tc, DCBX_PROTOCOL_ROCE_V2);
+ }
+
+ /* Update ramrod protocol data and hw_info fields
+ * with default info when corresponding APP TLV's are not detected.
+ * The enabled field has a different logic for ethernet as only for
+ * ethernet dcb should disabled by default, as the information arrives
+ * from the OS (unless an explicit app tlv was present).
+ */
+ tc = p_data->arr[DCBX_PROTOCOL_ETH].tc;
+ priority = p_data->arr[DCBX_PROTOCOL_ETH].priority;
+ for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) {
+ if (p_data->arr[type].update)
+ continue;
+
+ enable = !(type == DCBX_PROTOCOL_ETH);
+ qed_dcbx_update_app_info(p_data, p_hwfn, enable, true,
+ priority, tc, type);
+ }
+
+ return 0;
+}
+
+/* Parse app TLV's to update TC information in hw_info structure for
+ * reconfiguring QM. Get protocol specific data for PF update ramrod command.
+ */
+static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
+{
+ struct dcbx_app_priority_feature *p_app;
+ struct dcbx_app_priority_entry *p_tbl;
+ struct qed_dcbx_results data = { 0 };
+ struct dcbx_ets_feature *p_ets;
+ struct qed_hw_info *p_info;
+ u32 pri_tc_tbl, flags;
+ u8 dcbx_version;
+ int num_entries;
+ int rc = 0;
+
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+ dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
+
+ p_app = &p_hwfn->p_dcbx_info->operational.features.app;
+ p_tbl = p_app->app_pri_tbl;
+
+ p_ets = &p_hwfn->p_dcbx_info->operational.features.ets;
+ pri_tc_tbl = p_ets->pri_tc_tbl[0];
+
+ p_info = &p_hwfn->hw_info;
+ num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
+
+ rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
+ num_entries, dcbx_version);
+ if (rc)
+ return rc;
+
+ p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
+ data.pf_id = p_hwfn->rel_pf_id;
+ data.dcbx_enabled = !!dcbx_version;
+
+ qed_dcbx_dp_protocol(p_hwfn, &data);
+
+ memcpy(&p_hwfn->p_dcbx_info->results, &data,
+ sizeof(struct qed_dcbx_results));
+
+ return 0;
+}
+
+static int
+qed_dcbx_copy_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_mib_meta_data *p_data,
+ enum qed_mib_read_type type)
+{
+ u32 prefix_seq_num, suffix_seq_num;
+ int read_count = 0;
+ int rc = 0;
+
+ /* The data is considered to be valid only if both sequence numbers are
+ * the same.
+ */
+ do {
+ if (type == QED_DCBX_REMOTE_LLDP_MIB) {
+ qed_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->lldp_remote->prefix_seq_num;
+ suffix_seq_num = p_data->lldp_remote->suffix_seq_num;
+ } else {
+ qed_memcpy_from(p_hwfn, p_ptt, p_data->mib,
+ p_data->addr, p_data->size);
+ prefix_seq_num = p_data->mib->prefix_seq_num;
+ suffix_seq_num = p_data->mib->suffix_seq_num;
+ }
+ read_count++;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DCB,
+ "mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ } while ((prefix_seq_num != suffix_seq_num) &&
+ (read_count < QED_DCBX_MAX_MIB_READ_TRY));
+
+ if (read_count >= QED_DCBX_MAX_MIB_READ_TRY) {
+ DP_ERR(p_hwfn,
+ "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n",
+ type, read_count, prefix_seq_num, suffix_seq_num);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+#ifdef CONFIG_DCB
+static void
+qed_dcbx_get_priority_info(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_app_prio *p_prio,
+ struct qed_dcbx_results *p_results)
+{
+ u8 val;
+
+ p_prio->roce = QED_DCBX_INVALID_PRIORITY;
+ p_prio->roce_v2 = QED_DCBX_INVALID_PRIORITY;
+ p_prio->iscsi = QED_DCBX_INVALID_PRIORITY;
+ p_prio->fcoe = QED_DCBX_INVALID_PRIORITY;
+
+ if (p_results->arr[DCBX_PROTOCOL_ROCE].update &&
+ p_results->arr[DCBX_PROTOCOL_ROCE].enable)
+ p_prio->roce = p_results->arr[DCBX_PROTOCOL_ROCE].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_ROCE_V2].update &&
+ p_results->arr[DCBX_PROTOCOL_ROCE_V2].enable) {
+ val = p_results->arr[DCBX_PROTOCOL_ROCE_V2].priority;
+ p_prio->roce_v2 = val;
+ }
+
+ if (p_results->arr[DCBX_PROTOCOL_ISCSI].update &&
+ p_results->arr[DCBX_PROTOCOL_ISCSI].enable)
+ p_prio->iscsi = p_results->arr[DCBX_PROTOCOL_ISCSI].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_FCOE].update &&
+ p_results->arr[DCBX_PROTOCOL_FCOE].enable)
+ p_prio->fcoe = p_results->arr[DCBX_PROTOCOL_FCOE].priority;
+
+ if (p_results->arr[DCBX_PROTOCOL_ETH].update &&
+ p_results->arr[DCBX_PROTOCOL_ETH].enable)
+ p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "Priorities: iscsi %d, roce %d, roce v2 %d, fcoe %d, eth %d\n",
+ p_prio->iscsi, p_prio->roce, p_prio->roce_v2, p_prio->fcoe,
+ p_prio->eth);
+}
+
+static void
+qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct qed_dcbx_params *p_params, bool ieee)
+{
+ struct qed_app_entry *entry;
+ u8 pri_map;
+ int i;
+
+ p_params->app_willing = QED_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_WILLING);
+ p_params->app_valid = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ENABLED);
+ p_params->app_error = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR);
+ p_params->num_app_entries = QED_MFW_GET_FIELD(p_app->flags,
+ DCBX_APP_NUM_ENTRIES);
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &p_params->app_entry[i];
+ if (ieee) {
+ u8 sf_ieee;
+ u32 val;
+
+ sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF_IEEE);
+ switch (sf_ieee) {
+ case DCBX_APP_SF_IEEE_RESERVED:
+ /* Old MFW */
+ val = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF);
+ entry->sf_ieee = val ?
+ QED_DCBX_SF_IEEE_TCP_UDP_PORT :
+ QED_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_ETHTYPE:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_PORT:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_UDP_PORT:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT;
+ break;
+ }
+ } else {
+ entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF));
+ }
+
+ pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
+ entry->prio = ffs(pri_map) - 1;
+ entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_PROTOCOL_ID);
+ qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
+ entry->proto_id,
+ &entry->proto_type, ieee);
+ }
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "APP params: willing %d, valid %d error = %d\n",
+ p_params->app_willing, p_params->app_valid,
+ p_params->app_error);
+}
+
+static void
+qed_dcbx_get_pfc_data(struct qed_hwfn *p_hwfn,
+ u32 pfc, struct qed_dcbx_params *p_params)
+{
+ u8 pfc_map;
+
+ p_params->pfc.willing = QED_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING);
+ p_params->pfc.max_tc = QED_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS);
+ p_params->pfc.enabled = QED_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED);
+ pfc_map = QED_MFW_GET_FIELD(pfc, DCBX_PFC_PRI_EN_BITMAP);
+ p_params->pfc.prio[0] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_0);
+ p_params->pfc.prio[1] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_1);
+ p_params->pfc.prio[2] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_2);
+ p_params->pfc.prio[3] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_3);
+ p_params->pfc.prio[4] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_4);
+ p_params->pfc.prio[5] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_5);
+ p_params->pfc.prio[6] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_6);
+ p_params->pfc.prio[7] = !!(pfc_map & DCBX_PFC_PRI_EN_BITMAP_PRI_7);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "PFC params: willing %d, pfc_bitmap %d\n",
+ p_params->pfc.willing, pfc_map);
+}
+
+static void
+qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct qed_dcbx_params *p_params)
+{
+ u32 bw_map[2], tsa_map[2], pri_map;
+ int i;
+
+ p_params->ets_willing = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_WILLING);
+ p_params->ets_enabled = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_ENABLED);
+ p_params->ets_cbs = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_CBS);
+ p_params->max_ets_tc = QED_MFW_GET_FIELD(p_ets->flags,
+ DCBX_ETS_MAX_TCS);
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "ETS params: willing %d, ets_cbs %d pri_tc_tbl_0 %x max_ets_tc %d\n",
+ p_params->ets_willing,
+ p_params->ets_cbs,
+ p_ets->pri_tc_tbl[0], p_params->max_ets_tc);
+
+ /* 8 bit tsa and bw data corresponding to each of the 8 TC's are
+ * encoded in a type u32 array of size 2.
+ */
+ bw_map[0] = be32_to_cpu(p_ets->tc_bw_tbl[0]);
+ bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
+ tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
+ tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
+ pri_map = p_ets->pri_tc_tbl[0];
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+ p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
+ p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
+ p_params->ets_pri_tc_tbl[i] = QED_DCBX_PRIO2TC(pri_map, i);
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB,
+ "elem %d bw_tbl %x tsa_tbl %x\n",
+ i, p_params->ets_tc_bw_tbl[i],
+ p_params->ets_tc_tsa_tbl[i]);
+ }
+}
+
+static void
+qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct dcbx_app_priority_entry *p_tbl,
+ struct dcbx_ets_feature *p_ets,
+ u32 pfc, struct qed_dcbx_params *p_params, bool ieee)
+{
+ qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
+ qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
+ qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
+}
+
+static void
+qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+{
+ struct dcbx_features *p_feat;
+
+ p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
+ qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->local.params, false);
+ params->local.valid = true;
+}
+
+static void
+qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_dcbx_get *params)
+{
+ struct dcbx_features *p_feat;
+
+ p_feat = &p_hwfn->p_dcbx_info->remote.features;
+ qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->remote.params, false);
+ params->remote.valid = true;
+}
+
+static void
+qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *params)
+{
+ struct qed_dcbx_operational_params *p_operational;
+ struct qed_dcbx_results *p_results;
+ struct dcbx_features *p_feat;
+ bool enabled, err;
+ u32 flags;
+ bool val;
+
+ flags = p_hwfn->p_dcbx_info->operational.flags;
+
+ /* If DCBx version is non zero, then negotiation
+ * was successfuly performed
+ */
+ p_operational = &params->operational;
+ enabled = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) !=
+ DCBX_CONFIG_VERSION_DISABLED);
+ if (!enabled) {
+ p_operational->enabled = enabled;
+ p_operational->valid = false;
+ return;
+ }
+
+ p_feat = &p_hwfn->p_dcbx_info->operational.features;
+ p_results = &p_hwfn->p_dcbx_info->results;
+
+ val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_IEEE);
+ p_operational->ieee = val;
+ val = !!(QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) ==
+ DCBX_CONFIG_VERSION_CEE);
+ p_operational->cee = val;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Version support: ieee %d, cee %d\n",
+ p_operational->ieee, p_operational->cee);
+
+ qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
+ p_feat->app.app_pri_tbl, &p_feat->ets,
+ p_feat->pfc, &params->operational.params,
+ p_operational->ieee);
+ qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results);
+ err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
+ p_operational->err = err;
+ p_operational->enabled = enabled;
+ p_operational->valid = true;
+}
+
+static void
+qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *params)
+{
+ struct lldp_config_params_s *p_local;
+
+ p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
+
+ memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
+ ARRAY_SIZE(p_local->local_chassis_id));
+ memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
+ ARRAY_SIZE(p_local->local_port_id));
+}
+
+static void
+qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *params)
+{
+ struct lldp_status_params_s *p_remote;
+
+ p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
+
+ memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
+ ARRAY_SIZE(p_remote->peer_chassis_id));
+ memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
+ ARRAY_SIZE(p_remote->peer_port_id));
+}
+
+static int
+qed_dcbx_get_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_dcbx_get *p_params,
+ enum qed_mib_read_type type)
+{
+ switch (type) {
+ case QED_DCBX_REMOTE_MIB:
+ qed_dcbx_get_remote_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_LOCAL_MIB:
+ qed_dcbx_get_local_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_OPERATIONAL_MIB:
+ qed_dcbx_get_operational_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_REMOTE_LLDP_MIB:
+ qed_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params);
+ break;
+ case QED_DCBX_LOCAL_LLDP_MIB:
+ qed_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+#endif
+
+static int
+qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_config_params);
+ data.lldp_local = p_hwfn->p_dcbx_info->lldp_local;
+ data.size = sizeof(struct lldp_config_params_s);
+ qed_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_remote_lldp_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mib_read_type type)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port,
+ lldp_status_params);
+ data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote;
+ data.size = sizeof(struct lldp_status_params_s);
+ rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_operational_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum qed_mib_read_type type)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, operational_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->operational;
+ data.size = sizeof(struct dcbx_mib);
+ rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_remote_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, remote_dcbx_mib);
+ data.mib = &p_hwfn->p_dcbx_info->remote;
+ data.size = sizeof(struct dcbx_mib);
+ rc = qed_dcbx_copy_mib(p_hwfn, p_ptt, &data, type);
+
+ return rc;
+}
+
+static int
+qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_dcbx_mib_meta_data data;
+ int rc = 0;
+
+ memset(&data, 0, sizeof(data));
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &p_hwfn->p_dcbx_info->local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ qed_memcpy_from(p_hwfn, p_ptt, data.local_admin, data.addr, data.size);
+
+ return rc;
+}
+
+static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+ int rc = -EINVAL;
+
+ switch (type) {
+ case QED_DCBX_OPERATIONAL_MIB:
+ rc = qed_dcbx_read_operational_mib(p_hwfn, p_ptt, type);
+ break;
+ case QED_DCBX_REMOTE_MIB:
+ rc = qed_dcbx_read_remote_mib(p_hwfn, p_ptt, type);
+ break;
+ case QED_DCBX_LOCAL_MIB:
+ rc = qed_dcbx_read_local_mib(p_hwfn, p_ptt);
+ break;
+ case QED_DCBX_REMOTE_LLDP_MIB:
+ rc = qed_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type);
+ break;
+ case QED_DCBX_LOCAL_LLDP_MIB:
+ rc = qed_dcbx_read_local_lldp_mib(p_hwfn, p_ptt);
+ break;
+ default:
+ DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type);
+ }
+
+ return rc;
+}
+
+/* Read updated MIB.
+ * Reconfigure QM and invoke PF update ramrod command if operational MIB
+ * change is detected.
+ */
+int
+qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, enum qed_mib_read_type type)
+{
+ int rc = 0;
+
+ rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc)
+ return rc;
+
+ if (type == QED_DCBX_OPERATIONAL_MIB) {
+ rc = qed_dcbx_process_mib_info(p_hwfn);
+ if (!rc) {
+ /* reconfigure tcs of QM queues according
+ * to negotiation results
+ */
+ qed_qm_reconf(p_hwfn, p_ptt);
+
+ /* update storm FW with negotiation results */
+ qed_sp_pf_update(p_hwfn);
+ }
+ }
+
+ return rc;
+}
+
+int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn)
+{
+ int rc = 0;
+
+ p_hwfn->p_dcbx_info = kzalloc(sizeof(*p_hwfn->p_dcbx_info), GFP_KERNEL);
+ if (!p_hwfn->p_dcbx_info) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate 'struct qed_dcbx_info'\n");
+ rc = -ENOMEM;
+ }
+
+ return rc;
+}
+
+void qed_dcbx_info_free(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_info *p_dcbx_info)
+{
+ kfree(p_hwfn->p_dcbx_info);
+}
+
+static void qed_dcbx_update_protocol_data(struct protocol_dcb_data *p_data,
+ struct qed_dcbx_results *p_src,
+ enum dcbx_protocol_type type)
+{
+ p_data->dcb_enable_flag = p_src->arr[type].enable;
+ p_data->dcb_priority = p_src->arr[type].priority;
+ p_data->dcb_tc = p_src->arr[type].tc;
+}
+
+/* Set pf update ramrod command params */
+void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest)
+{
+ struct protocol_dcb_data *p_dcb_data;
+ bool update_flag = false;
+
+ p_dest->pf_id = p_src->pf_id;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
+ p_dest->update_fcoe_dcb_data_flag = update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ROCE].update;
+ p_dest->update_roce_dcb_data_flag = update_flag;
+ update_flag = p_src->arr[DCBX_PROTOCOL_ROCE_V2].update;
+ p_dest->update_roce_dcb_data_flag = update_flag;
+
+ update_flag = p_src->arr[DCBX_PROTOCOL_ISCSI].update;
+ p_dest->update_iscsi_dcb_data_flag = update_flag;
+ update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update;
+ p_dest->update_eth_dcb_data_flag = update_flag;
+
+ p_dcb_data = &p_dest->fcoe_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_FCOE);
+ p_dcb_data = &p_dest->roce_dcb_data;
+
+ if (p_src->arr[DCBX_PROTOCOL_ROCE].update)
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src,
+ DCBX_PROTOCOL_ROCE);
+ if (p_src->arr[DCBX_PROTOCOL_ROCE_V2].update)
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src,
+ DCBX_PROTOCOL_ROCE_V2);
+
+ p_dcb_data = &p_dest->iscsi_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ISCSI);
+ p_dcb_data = &p_dest->eth_dcb_data;
+ qed_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH);
+}
+
+#ifdef CONFIG_DCB
+static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_get *p_get,
+ enum qed_mib_read_type type)
+{
+ struct qed_ptt *p_ptt;
+ int rc;
+
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = qed_dcbx_read_mib(p_hwfn, p_ptt, type);
+ if (rc)
+ goto out;
+
+ rc = qed_dcbx_get_params(p_hwfn, p_ptt, p_get, type);
+
+out:
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+}
+
+static void
+qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
+ u32 *pfc, struct qed_dcbx_params *p_params)
+{
+ u8 pfc_map = 0;
+ int i;
+
+ if (p_params->pfc.willing)
+ *pfc |= DCBX_PFC_WILLING_MASK;
+ else
+ *pfc &= ~DCBX_PFC_WILLING_MASK;
+
+ if (p_params->pfc.enabled)
+ *pfc |= DCBX_PFC_ENABLED_MASK;
+ else
+ *pfc &= ~DCBX_PFC_ENABLED_MASK;
+
+ *pfc &= ~DCBX_PFC_CAPS_MASK;
+ *pfc |= (u32)p_params->pfc.max_tc << DCBX_PFC_CAPS_SHIFT;
+
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ if (p_params->pfc.prio[i])
+ pfc_map |= BIT(i);
+
+ *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
+ *pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc);
+}
+
+static void
+qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_ets_feature *p_ets,
+ struct qed_dcbx_params *p_params)
+{
+ u8 *bw_map, *tsa_map;
+ u32 val;
+ int i;
+
+ if (p_params->ets_willing)
+ p_ets->flags |= DCBX_ETS_WILLING_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_WILLING_MASK;
+
+ if (p_params->ets_cbs)
+ p_ets->flags |= DCBX_ETS_CBS_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_CBS_MASK;
+
+ if (p_params->ets_enabled)
+ p_ets->flags |= DCBX_ETS_ENABLED_MASK;
+ else
+ p_ets->flags &= ~DCBX_ETS_ENABLED_MASK;
+
+ p_ets->flags &= ~DCBX_ETS_MAX_TCS_MASK;
+ p_ets->flags |= (u32)p_params->max_ets_tc << DCBX_ETS_MAX_TCS_SHIFT;
+
+ bw_map = (u8 *)&p_ets->tc_bw_tbl[0];
+ tsa_map = (u8 *)&p_ets->tc_tsa_tbl[0];
+ p_ets->pri_tc_tbl[0] = 0;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+ bw_map[i] = p_params->ets_tc_bw_tbl[i];
+ tsa_map[i] = p_params->ets_tc_tsa_tbl[i];
+ /* Copy the priority value to the corresponding 4 bits in the
+ * traffic class table.
+ */
+ val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
+ p_ets->pri_tc_tbl[0] |= val;
+ }
+ for (i = 0; i < 2; i++) {
+ p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
+ p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
+ }
+}
+
+static void
+qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
+ struct dcbx_app_priority_feature *p_app,
+ struct qed_dcbx_params *p_params, bool ieee)
+{
+ u32 *entry;
+ int i;
+
+ if (p_params->app_willing)
+ p_app->flags |= DCBX_APP_WILLING_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_WILLING_MASK;
+
+ if (p_params->app_valid)
+ p_app->flags |= DCBX_APP_ENABLED_MASK;
+ else
+ p_app->flags &= ~DCBX_APP_ENABLED_MASK;
+
+ p_app->flags &= ~DCBX_APP_NUM_ENTRIES_MASK;
+ p_app->flags |= (u32)p_params->num_app_entries <<
+ DCBX_APP_NUM_ENTRIES_SHIFT;
+
+ for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &p_app->app_pri_tbl[i].entry;
+ *entry = 0;
+ if (ieee) {
+ *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
+ switch (p_params->app_entry[i].sf_ieee) {
+ case QED_DCBX_SF_IEEE_ETHTYPE:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ case QED_DCBX_SF_IEEE_TCP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ case QED_DCBX_SF_IEEE_UDP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ }
+ } else {
+ *entry &= ~DCBX_APP_SF_MASK;
+ if (p_params->app_entry[i].ethtype)
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
+ else
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ }
+
+ *entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
+ *entry |= ((u32)p_params->app_entry[i].proto_id <<
+ DCBX_APP_PROTOCOL_ID_SHIFT);
+ *entry &= ~DCBX_APP_PRI_MAP_MASK;
+ *entry |= ((u32)(p_params->app_entry[i].prio) <<
+ DCBX_APP_PRI_MAP_SHIFT);
+ }
+}
+
+static void
+qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
+ struct dcbx_local_params *local_admin,
+ struct qed_dcbx_set *params)
+{
+ bool ieee = false;
+
+ local_admin->flags = 0;
+ memcpy(&local_admin->features,
+ &p_hwfn->p_dcbx_info->operational.features,
+ sizeof(local_admin->features));
+
+ if (params->enabled) {
+ local_admin->config = params->ver_num;
+ ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE);
+ } else {
+ local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
+ }
+
+ if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
+ qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
+ &params->config.params);
+
+ if (params->override_flags & QED_DCBX_OVERRIDE_ETS_CFG)
+ qed_dcbx_set_ets_data(p_hwfn, &local_admin->features.ets,
+ &params->config.params);
+
+ if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG)
+ qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
+ &params->config.params, ieee);
+}
+
+int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_dcbx_set *params, bool hw_commit)
+{
+ struct dcbx_local_params local_admin;
+ struct qed_dcbx_mib_meta_data data;
+ u32 resp = 0, param = 0;
+ int rc = 0;
+
+ if (!hw_commit) {
+ memcpy(&p_hwfn->p_dcbx_info->set, params,
+ sizeof(struct qed_dcbx_set));
+ return 0;
+ }
+
+ /* clear set-parmas cache */
+ memset(&p_hwfn->p_dcbx_info->set, 0, sizeof(p_hwfn->p_dcbx_info->set));
+
+ memset(&local_admin, 0, sizeof(local_admin));
+ qed_dcbx_set_local_params(p_hwfn, &local_admin, params);
+
+ data.addr = p_hwfn->mcp_info->port_addr +
+ offsetof(struct public_port, local_admin_dcbx_mib);
+ data.local_admin = &local_admin;
+ data.size = sizeof(struct dcbx_local_params);
+ qed_memcpy_to(p_hwfn, p_ptt, data.addr, data.local_admin, data.size);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_LLDP_SEND_SHIFT, &resp, &param);
+ if (rc)
+ DP_NOTICE(p_hwfn, "Failed to send DCBX update request\n");
+
+ return rc;
+}
+
+int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_set *params)
+{
+ struct qed_dcbx_get *dcbx_info;
+ int rc;
+
+ if (p_hwfn->p_dcbx_info->set.config.valid) {
+ memcpy(params, &p_hwfn->p_dcbx_info->set,
+ sizeof(struct qed_dcbx_set));
+ return 0;
+ }
+
+ dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
+ if (!dcbx_info) {
+ DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
+ return -ENOMEM;
+ }
+
+ rc = qed_dcbx_query_params(p_hwfn, dcbx_info, QED_DCBX_OPERATIONAL_MIB);
+ if (rc) {
+ kfree(dcbx_info);
+ return rc;
+ }
+
+ p_hwfn->p_dcbx_info->set.override_flags = 0;
+ p_hwfn->p_dcbx_info->set.ver_num = DCBX_CONFIG_VERSION_DISABLED;
+ if (dcbx_info->operational.cee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_CEE;
+ if (dcbx_info->operational.ieee)
+ p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+
+ p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
+ memcpy(&p_hwfn->p_dcbx_info->set.config.params,
+ &dcbx_info->operational.params,
+ sizeof(struct qed_dcbx_admin_params));
+ p_hwfn->p_dcbx_info->set.config.valid = true;
+
+ memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
+ enum qed_mib_read_type type)
+{
+ struct qed_dcbx_get *dcbx_info;
+
+ dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
+ if (!dcbx_info) {
+ DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
+ return NULL;
+ }
+
+ if (qed_dcbx_query_params(hwfn, dcbx_info, type)) {
+ kfree(dcbx_info);
+ return NULL;
+ }
+
+ if ((type == QED_DCBX_OPERATIONAL_MIB) &&
+ !dcbx_info->operational.enabled) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational\n");
+ kfree(dcbx_info);
+ return NULL;
+ }
+
+ return dcbx_info;
+}
+
+static u8 qed_dcbnl_getstate(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ bool enabled;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 0;
+
+ enabled = dcbx_info->operational.enabled;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", enabled);
+ kfree(dcbx_info);
+
+ return enabled;
+}
+
+static u8 qed_dcbnl_setstate(struct qed_dev *cdev, u8 state)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "DCB state = %d\n", state);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ dcbx_set.enabled = !!state;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc ? 1 : 0;
+}
+
+static void qed_dcbnl_getpgtccfgtx(struct qed_dev *cdev, int tc, u8 *prio_type,
+ u8 *pgid, u8 *bw_pct, u8 *up_map)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "tc = %d\n", tc);
+ *prio_type = *pgid = *bw_pct = *up_map = 0;
+ if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid tc %d\n", tc);
+ return;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return;
+
+ *pgid = dcbx_info->operational.params.ets_pri_tc_tbl[tc];
+ kfree(dcbx_info);
+}
+
+static void qed_dcbnl_getpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 *bw_pct)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ *bw_pct = 0;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d\n", pgid);
+ if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid pgid %d\n", pgid);
+ return;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return;
+
+ *bw_pct = dcbx_info->operational.params.ets_tc_bw_tbl[pgid];
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "bw_pct = %d\n", *bw_pct);
+ kfree(dcbx_info);
+}
+
+static void qed_dcbnl_getpgtccfgrx(struct qed_dev *cdev, int tc, u8 *prio,
+ u8 *bwg_id, u8 *bw_pct, u8 *up_map)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+ *prio = *bwg_id = *bw_pct = *up_map = 0;
+}
+
+static void qed_dcbnl_getpgbwgcfgrx(struct qed_dev *cdev,
+ int bwg_id, u8 *bw_pct)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+ *bw_pct = 0;
+}
+
+static void qed_dcbnl_getpfccfg(struct qed_dev *cdev,
+ int priority, u8 *setting)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d\n", priority);
+ if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid priority %d\n", priority);
+ return;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return;
+
+ *setting = dcbx_info->operational.params.pfc.prio[priority];
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "setting = %d\n", *setting);
+ kfree(dcbx_info);
+}
+
+static void qed_dcbnl_setpfccfg(struct qed_dev *cdev, int priority, u8 setting)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "priority = %d setting = %d\n",
+ priority, setting);
+ if (priority < 0 || priority >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid priority %d\n", priority);
+ return;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.prio[priority] = !!setting;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int rc = 0;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "capid = %d\n", capid);
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 1;
+
+ switch (capid) {
+ case DCB_CAP_ATTR_PG:
+ case DCB_CAP_ATTR_PFC:
+ case DCB_CAP_ATTR_UP2TC:
+ case DCB_CAP_ATTR_GSP:
+ *cap = true;
+ break;
+ case DCB_CAP_ATTR_PG_TCS:
+ case DCB_CAP_ATTR_PFC_TCS:
+ *cap = 0x80;
+ break;
+ case DCB_CAP_ATTR_DCBX:
+ *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
+ DCB_CAP_DCBX_VER_IEEE);
+ break;
+ default:
+ *cap = false;
+ rc = 1;
+ }
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "id = %d caps = %d\n", capid, *cap);
+ kfree(dcbx_info);
+
+ return rc;
+}
+
+static int qed_dcbnl_getnumtcs(struct qed_dev *cdev, int tcid, u8 *num)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int rc = 0;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d\n", tcid);
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ *num = dcbx_info->operational.params.max_ets_tc;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ *num = dcbx_info->operational.params.pfc.max_tc;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ kfree(dcbx_info);
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "numtcs = %d\n", *num);
+
+ return rc;
+}
+
+static u8 qed_dcbnl_getpfcstate(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ bool enabled;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 0;
+
+ enabled = dcbx_info->operational.params.pfc.enabled;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d\n", enabled);
+ kfree(dcbx_info);
+
+ return enabled;
+}
+
+static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ u8 mode = 0;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 0;
+
+ if (dcbx_info->operational.enabled)
+ mode |= DCB_CAP_DCBX_LLD_MANAGED;
+ if (dcbx_info->operational.ieee)
+ mode |= DCB_CAP_DCBX_VER_IEEE;
+ if (dcbx_info->operational.cee)
+ mode |= DCB_CAP_DCBX_VER_CEE;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "dcb mode = %d\n", mode);
+ kfree(dcbx_info);
+
+ return mode;
+}
+
+static void qed_dcbnl_setpgtccfgtx(struct qed_dev *cdev,
+ int tc,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB,
+ "tc = %d pri_type = %d pgid = %d bw_pct = %d up_map = %d\n",
+ tc, pri_type, pgid, bw_pct, up_map);
+
+ if (tc < 0 || tc >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid tc %d\n", tc);
+ return;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.ets_pri_tc_tbl[tc] = pgid;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_dcbnl_setpgtccfgrx(struct qed_dev *cdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+}
+
+static void qed_dcbnl_setpgbwgcfgtx(struct qed_dev *cdev, int pgid, u8 bw_pct)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pgid = %d bw_pct = %d\n", pgid, bw_pct);
+ if (pgid < 0 || pgid >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid pgid %d\n", pgid);
+ return;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.ets_tc_bw_tbl[pgid] = bw_pct;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_dcbnl_setpgbwgcfgrx(struct qed_dev *cdev, int pgid, u8 bw_pct)
+{
+ DP_INFO(QED_LEADING_HWFN(cdev), "Rx ETS is not supported\n");
+}
+
+static u8 qed_dcbnl_setall(struct qed_dev *cdev)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 1);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_dcbnl_setnumtcs(struct qed_dev *cdev, int tcid, u8 num)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "tcid = %d num = %d\n", tcid, num);
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ switch (tcid) {
+ case DCB_NUMTCS_ATTR_PG:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.max_ets_tc = num;
+ break;
+ case DCB_NUMTCS_ATTR_PFC:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.max_tc = num;
+ break;
+ default:
+ DP_INFO(hwfn, "Invalid tcid %d\n", tcid);
+ return -EINVAL;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EINVAL;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return 0;
+}
+
+static void qed_dcbnl_setpfcstate(struct qed_dev *cdev, u8 state)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "new state = %d\n", state);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.enabled = !!state;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static int qed_dcbnl_getapp(struct qed_dev *cdev, u8 idtype, u16 idval)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_app_entry *entry;
+ bool ethtype;
+ u8 prio = 0;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_info->operational.params.app_entry[i];
+ if ((entry->ethtype == ethtype) && (entry->proto_id == idval)) {
+ prio = entry->prio;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App entry (%d, %d) not found\n", idtype, idval);
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ return prio;
+}
+
+static int qed_dcbnl_setapp(struct qed_dev *cdev,
+ u8 idtype, u16 idval, u8 pri_map)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_app_entry *entry;
+ struct qed_ptt *ptt;
+ bool ethtype;
+ int rc, i;
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ ethtype = !!(idtype == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_set.config.params.app_entry[i];
+ if ((entry->ethtype == ethtype) && (entry->proto_id == idval))
+ break;
+ /* First empty slot */
+ if (!entry->proto_id) {
+ dcbx_set.config.params.num_app_entries++;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App table is full\n");
+ return -EBUSY;
+ }
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+ dcbx_set.config.params.app_entry[i].ethtype = ethtype;
+ dcbx_set.config.params.app_entry[i].proto_id = idval;
+ dcbx_set.config.params.app_entry[i].prio = pri_map;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static u8 qed_dcbnl_setdcbx(struct qed_dev *cdev, u8 mode)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "new mode = %x\n", mode);
+
+ if (!(mode & DCB_CAP_DCBX_VER_IEEE) && !(mode & DCB_CAP_DCBX_VER_CEE)) {
+ DP_INFO(hwfn, "Allowed mode is cee, ieee or both\n");
+ return 1;
+ }
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ dcbx_set.ver_num = 0;
+ if (mode & DCB_CAP_DCBX_VER_CEE) {
+ dcbx_set.ver_num |= DCBX_CONFIG_VERSION_CEE;
+ dcbx_set.enabled = true;
+ }
+
+ if (mode & DCB_CAP_DCBX_VER_IEEE) {
+ dcbx_set.ver_num |= DCBX_CONFIG_VERSION_IEEE;
+ dcbx_set.enabled = true;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return 0;
+}
+
+static u8 qed_dcbnl_getfeatcfg(struct qed_dev *cdev, int featid, u8 *flags)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "Feature id = %d\n", featid);
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return 1;
+
+ *flags = 0;
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ if (dcbx_info->operational.params.ets_enabled)
+ *flags = DCB_FEATCFG_ENABLE;
+ else
+ *flags = DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ if (dcbx_info->operational.params.pfc.enabled)
+ *flags = DCB_FEATCFG_ENABLE;
+ else
+ *flags = DCB_FEATCFG_ERROR;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ if (dcbx_info->operational.params.app_valid)
+ *flags = DCB_FEATCFG_ENABLE;
+ else
+ *flags = DCB_FEATCFG_ERROR;
+ break;
+ default:
+ DP_INFO(hwfn, "Invalid feature-ID %d\n", featid);
+ kfree(dcbx_info);
+ return 1;
+ }
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "flags = %d\n", *flags);
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static u8 qed_dcbnl_setfeatcfg(struct qed_dev *cdev, int featid, u8 flags)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_set dcbx_set;
+ bool enabled, willing;
+ struct qed_ptt *ptt;
+ int rc;
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "featid = %d flags = %d\n",
+ featid, flags);
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return 1;
+
+ enabled = !!(flags & DCB_FEATCFG_ENABLE);
+ willing = !!(flags & DCB_FEATCFG_WILLING);
+ switch (featid) {
+ case DCB_FEATCFG_ATTR_PG:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.ets_enabled = enabled;
+ dcbx_set.config.params.ets_willing = willing;
+ break;
+ case DCB_FEATCFG_ATTR_PFC:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ dcbx_set.config.params.pfc.enabled = enabled;
+ dcbx_set.config.params.pfc.willing = willing;
+ break;
+ case DCB_FEATCFG_ATTR_APP:
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+ dcbx_set.config.params.app_willing = willing;
+ break;
+ default:
+ DP_INFO(hwfn, "Invalid feature-ID %d\n", featid);
+ return 1;
+ }
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return 1;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return 0;
+}
+
+static int qed_dcbnl_peer_getappinfo(struct qed_dev *cdev,
+ struct dcb_peer_app_info *info,
+ u16 *app_count)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ info->willing = dcbx_info->remote.params.app_willing;
+ info->error = dcbx_info->remote.params.app_error;
+ *app_count = dcbx_info->remote.params.num_app_entries;
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_peer_getapptable(struct qed_dev *cdev,
+ struct dcb_app *table)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ for (i = 0; i < dcbx_info->remote.params.num_app_entries; i++) {
+ if (dcbx_info->remote.params.app_entry[i].ethtype)
+ table[i].selector = DCB_APP_IDTYPE_ETHTYPE;
+ else
+ table[i].selector = DCB_APP_IDTYPE_PORTNUM;
+ table[i].priority = dcbx_info->remote.params.app_entry[i].prio;
+ table[i].protocol =
+ dcbx_info->remote.params.app_entry[i].proto_id;
+ }
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_cee_peer_getpfc(struct qed_dev *cdev, struct cee_pfc *pfc)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ if (dcbx_info->remote.params.pfc.prio[i])
+ pfc->pfc_en |= BIT(i);
+
+ pfc->tcs_supported = dcbx_info->remote.params.pfc.max_tc;
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "pfc state = %d tcs_supported = %d\n",
+ pfc->pfc_en, pfc->tcs_supported);
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_cee_peer_getpg(struct qed_dev *cdev, struct cee_pg *pg)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_REMOTE_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ pg->willing = dcbx_info->remote.params.ets_willing;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
+ pg->pg_bw[i] = dcbx_info->remote.params.ets_tc_bw_tbl[i];
+ pg->prio_pg[i] = dcbx_info->remote.params.ets_pri_tc_tbl[i];
+ }
+
+ DP_VERBOSE(hwfn, QED_MSG_DCB, "willing = %d", pg->willing);
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_get_ieee_pfc(struct qed_dev *cdev,
+ struct ieee_pfc *pfc, bool remote)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_params *params;
+ struct qed_dcbx_get *dcbx_info;
+ int rc, i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ return -EINVAL;
+ }
+
+ if (remote) {
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
+ rc = qed_dcbx_query_params(hwfn, dcbx_info,
+ QED_DCBX_REMOTE_MIB);
+ if (rc) {
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ params = &dcbx_info->remote.params;
+ } else {
+ params = &dcbx_info->operational.params;
+ }
+
+ pfc->pfc_cap = params->pfc.max_tc;
+ pfc->pfc_en = 0;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ if (params->pfc.prio[i])
+ pfc->pfc_en |= BIT(i);
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_ieee_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+ return qed_dcbnl_get_ieee_pfc(cdev, pfc, false);
+}
+
+static int qed_dcbnl_ieee_setpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc, i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_PFC_CFG;
+ for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++)
+ dcbx_set.config.params.pfc.prio[i] = !!(pfc->pfc_en & BIT(i));
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EINVAL;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+static int qed_dcbnl_get_ieee_ets(struct qed_dev *cdev,
+ struct ieee_ets *ets, bool remote)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_params *params;
+ int rc;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ if (remote) {
+ memset(dcbx_info, 0, sizeof(*dcbx_info));
+ rc = qed_dcbx_query_params(hwfn, dcbx_info,
+ QED_DCBX_REMOTE_MIB);
+ if (rc) {
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ params = &dcbx_info->remote.params;
+ } else {
+ params = &dcbx_info->operational.params;
+ }
+
+ ets->ets_cap = params->max_ets_tc;
+ ets->willing = params->ets_willing;
+ ets->cbs = params->ets_cbs;
+ memcpy(ets->tc_tx_bw, params->ets_tc_bw_tbl, sizeof(ets->tc_tx_bw));
+ memcpy(ets->tc_tsa, params->ets_tc_tsa_tbl, sizeof(ets->tc_tsa));
+ memcpy(ets->prio_tc, params->ets_pri_tc_tbl, sizeof(ets->prio_tc));
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+static int qed_dcbnl_ieee_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+ return qed_dcbnl_get_ieee_ets(cdev, ets, false);
+}
+
+static int qed_dcbnl_ieee_setets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_set dcbx_set;
+ struct qed_ptt *ptt;
+ int rc;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_ETS_CFG;
+ dcbx_set.config.params.max_ets_tc = ets->ets_cap;
+ dcbx_set.config.params.ets_willing = ets->willing;
+ dcbx_set.config.params.ets_cbs = ets->cbs;
+ memcpy(dcbx_set.config.params.ets_tc_bw_tbl, ets->tc_tx_bw,
+ sizeof(ets->tc_tx_bw));
+ memcpy(dcbx_set.config.params.ets_tc_tsa_tbl, ets->tc_tsa,
+ sizeof(ets->tc_tsa));
+ memcpy(dcbx_set.config.params.ets_pri_tc_tbl, ets->prio_tc,
+ sizeof(ets->prio_tc));
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EINVAL;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+int qed_dcbnl_ieee_peer_getets(struct qed_dev *cdev, struct ieee_ets *ets)
+{
+ return qed_dcbnl_get_ieee_ets(cdev, ets, true);
+}
+
+int qed_dcbnl_ieee_peer_getpfc(struct qed_dev *cdev, struct ieee_pfc *pfc)
+{
+ return qed_dcbnl_get_ieee_pfc(cdev, pfc, true);
+}
+
+int qed_dcbnl_ieee_getapp(struct qed_dev *cdev, struct dcb_app *app)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_app_entry *entry;
+ bool ethtype;
+ u8 prio = 0;
+ int i;
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ /* ieee defines the selector field value for ethertype to be 1 */
+ ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_info->operational.params.app_entry[i];
+ if ((entry->ethtype == ethtype) &&
+ (entry->proto_id == app->protocol)) {
+ prio = entry->prio;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App entry (%d, %d) not found\n", app->selector,
+ app->protocol);
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ app->priority = ffs(prio) - 1;
+
+ kfree(dcbx_info);
+
+ return 0;
+}
+
+int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_dcbx_get *dcbx_info;
+ struct qed_dcbx_set dcbx_set;
+ struct qed_app_entry *entry;
+ struct qed_ptt *ptt;
+ bool ethtype;
+ int rc, i;
+
+ if (app->priority < 0 || app->priority >= QED_MAX_PFC_PRIORITIES) {
+ DP_INFO(hwfn, "Invalid priority %d\n", app->priority);
+ return -EINVAL;
+ }
+
+ dcbx_info = qed_dcbnl_get_dcbx(hwfn, QED_DCBX_OPERATIONAL_MIB);
+ if (!dcbx_info)
+ return -EINVAL;
+
+ if (!dcbx_info->operational.ieee) {
+ DP_INFO(hwfn, "DCBX is not enabled/operational in IEEE mode\n");
+ kfree(dcbx_info);
+ return -EINVAL;
+ }
+
+ kfree(dcbx_info);
+
+ memset(&dcbx_set, 0, sizeof(dcbx_set));
+ rc = qed_dcbx_get_config_params(hwfn, &dcbx_set);
+ if (rc)
+ return -EINVAL;
+
+ /* ieee defines the selector field value for ethertype to be 1 */
+ ethtype = !!((app->selector - 1) == DCB_APP_IDTYPE_ETHTYPE);
+ for (i = 0; i < QED_DCBX_MAX_APP_PROTOCOL; i++) {
+ entry = &dcbx_set.config.params.app_entry[i];
+ if ((entry->ethtype == ethtype) &&
+ (entry->proto_id == app->protocol))
+ break;
+ /* First empty slot */
+ if (!entry->proto_id) {
+ dcbx_set.config.params.num_app_entries++;
+ break;
+ }
+ }
+
+ if (i == QED_DCBX_MAX_APP_PROTOCOL) {
+ DP_ERR(cdev, "App table is full\n");
+ return -EBUSY;
+ }
+
+ dcbx_set.override_flags |= QED_DCBX_OVERRIDE_APP_CFG;
+ dcbx_set.config.params.app_entry[i].ethtype = ethtype;
+ dcbx_set.config.params.app_entry[i].proto_id = app->protocol;
+ dcbx_set.config.params.app_entry[i].prio = BIT(app->priority);
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EBUSY;
+
+ rc = qed_dcbx_config_params(hwfn, ptt, &dcbx_set, 0);
+
+ qed_ptt_release(hwfn, ptt);
+
+ return rc;
+}
+
+const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass = {
+ .getstate = qed_dcbnl_getstate,
+ .setstate = qed_dcbnl_setstate,
+ .getpgtccfgtx = qed_dcbnl_getpgtccfgtx,
+ .getpgbwgcfgtx = qed_dcbnl_getpgbwgcfgtx,
+ .getpgtccfgrx = qed_dcbnl_getpgtccfgrx,
+ .getpgbwgcfgrx = qed_dcbnl_getpgbwgcfgrx,
+ .getpfccfg = qed_dcbnl_getpfccfg,
+ .setpfccfg = qed_dcbnl_setpfccfg,
+ .getcap = qed_dcbnl_getcap,
+ .getnumtcs = qed_dcbnl_getnumtcs,
+ .getpfcstate = qed_dcbnl_getpfcstate,
+ .getdcbx = qed_dcbnl_getdcbx,
+ .setpgtccfgtx = qed_dcbnl_setpgtccfgtx,
+ .setpgtccfgrx = qed_dcbnl_setpgtccfgrx,
+ .setpgbwgcfgtx = qed_dcbnl_setpgbwgcfgtx,
+ .setpgbwgcfgrx = qed_dcbnl_setpgbwgcfgrx,
+ .setall = qed_dcbnl_setall,
+ .setnumtcs = qed_dcbnl_setnumtcs,
+ .setpfcstate = qed_dcbnl_setpfcstate,
+ .setapp = qed_dcbnl_setapp,
+ .setdcbx = qed_dcbnl_setdcbx,
+ .setfeatcfg = qed_dcbnl_setfeatcfg,
+ .getfeatcfg = qed_dcbnl_getfeatcfg,
+ .getapp = qed_dcbnl_getapp,
+ .peer_getappinfo = qed_dcbnl_peer_getappinfo,
+ .peer_getapptable = qed_dcbnl_peer_getapptable,
+ .cee_peer_getpfc = qed_dcbnl_cee_peer_getpfc,
+ .cee_peer_getpg = qed_dcbnl_cee_peer_getpg,
+ .ieee_getpfc = qed_dcbnl_ieee_getpfc,
+ .ieee_setpfc = qed_dcbnl_ieee_setpfc,
+ .ieee_getets = qed_dcbnl_ieee_getets,
+ .ieee_setets = qed_dcbnl_ieee_setets,
+ .ieee_peer_getpfc = qed_dcbnl_ieee_peer_getpfc,
+ .ieee_peer_getets = qed_dcbnl_ieee_peer_getets,
+ .ieee_getapp = qed_dcbnl_ieee_getapp,
+ .ieee_setapp = qed_dcbnl_ieee_setapp,
+};
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
new file mode 100644
index 000000000000..9ba681643d05
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -0,0 +1,108 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_DCBX_H
+#define _QED_DCBX_H
+#include <linux/types.h>
+#include <linux/slab.h>
+#include "qed.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
+
+enum qed_mib_read_type {
+ QED_DCBX_OPERATIONAL_MIB,
+ QED_DCBX_REMOTE_MIB,
+ QED_DCBX_LOCAL_MIB,
+ QED_DCBX_REMOTE_LLDP_MIB,
+ QED_DCBX_LOCAL_LLDP_MIB
+};
+
+struct qed_dcbx_app_data {
+ bool enable; /* DCB enabled */
+ bool update; /* Update indication */
+ u8 priority; /* Priority */
+ u8 tc; /* Traffic Class */
+};
+
+#ifdef CONFIG_DCB
+#define QED_DCBX_VERSION_DISABLED 0
+#define QED_DCBX_VERSION_IEEE 1
+#define QED_DCBX_VERSION_CEE 2
+
+struct qed_dcbx_set {
+#define QED_DCBX_OVERRIDE_STATE BIT(0)
+#define QED_DCBX_OVERRIDE_PFC_CFG BIT(1)
+#define QED_DCBX_OVERRIDE_ETS_CFG BIT(2)
+#define QED_DCBX_OVERRIDE_APP_CFG BIT(3)
+#define QED_DCBX_OVERRIDE_DSCP_CFG BIT(4)
+ u32 override_flags;
+ bool enabled;
+ struct qed_dcbx_admin_params config;
+ u32 ver_num;
+};
+#endif
+
+struct qed_dcbx_results {
+ bool dcbx_enabled;
+ u8 pf_id;
+ struct qed_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE];
+};
+
+struct qed_dcbx_app_metadata {
+ enum dcbx_protocol_type id;
+ char *name;
+ enum qed_pci_personality personality;
+};
+
+#define QED_MFW_GET_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _SHIFT))
+
+struct qed_dcbx_info {
+ struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS];
+ struct dcbx_local_params local_admin;
+ struct qed_dcbx_results results;
+ struct dcbx_mib operational;
+ struct dcbx_mib remote;
+#ifdef CONFIG_DCB
+ struct qed_dcbx_set set;
+#endif
+ u8 dcbx_cap;
+};
+
+struct qed_dcbx_mib_meta_data {
+ struct lldp_config_params_s *lldp_local;
+ struct lldp_status_params_s *lldp_remote;
+ struct dcbx_local_params *local_admin;
+ struct dcbx_mib *mib;
+ size_t size;
+ u32 addr;
+};
+
+#ifdef CONFIG_DCB
+int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *);
+
+int qed_dcbx_config_params(struct qed_hwfn *,
+ struct qed_ptt *, struct qed_dcbx_set *, bool);
+#endif
+
+/* QED local interface routines */
+int
+qed_dcbx_mib_update_event(struct qed_hwfn *,
+ struct qed_ptt *, enum qed_mib_read_type);
+
+int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn);
+void qed_dcbx_info_free(struct qed_hwfn *, struct qed_dcbx_info *);
+void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
+ struct pf_update_ramrod_data *p_dest);
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index b7d100f6bd6f..0e4f4a9306b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -17,11 +17,13 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/vmalloc.h>
#include <linux/etherdevice.h>
#include <linux/qed/qed_chain.h>
#include <linux/qed/qed_if.h>
#include "qed.h"
#include "qed_cxt.h"
+#include "qed_dcbx.h"
#include "qed_dev_api.h"
#include "qed_hsi.h"
#include "qed_hw.h"
@@ -30,6 +32,11 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+static spinlock_t qm_lock;
+static bool qm_lock_init = false;
/* API common to all protocols */
enum BAR_ID {
@@ -40,10 +47,14 @@ enum BAR_ID {
static u32 qed_hw_bar_size(struct qed_hwfn *p_hwfn,
enum BAR_ID bar_id)
{
- u32 bar_reg = (bar_id == BAR_ID_0 ?
- PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
- u32 val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
+ u32 bar_reg = (bar_id == BAR_ID_0 ?
+ PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
+ u32 val;
+
+ if (IS_VF(p_hwfn->cdev))
+ return 1 << 17;
+ val = qed_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
if (val)
return 1 << (val + 15);
@@ -105,12 +116,17 @@ static void qed_qm_info_free(struct qed_hwfn *p_hwfn)
qm_info->qm_vport_params = NULL;
kfree(qm_info->qm_port_params);
qm_info->qm_port_params = NULL;
+ kfree(qm_info->wfq_data);
+ qm_info->wfq_data = NULL;
}
void qed_resc_free(struct qed_dev *cdev)
{
int i;
+ if (IS_VF(cdev))
+ return;
+
kfree(cdev->fw_data);
cdev->fw_data = NULL;
@@ -134,22 +150,54 @@ void qed_resc_free(struct qed_dev *cdev)
qed_eq_free(p_hwfn, p_hwfn->p_eq);
qed_consq_free(p_hwfn, p_hwfn->p_consq);
qed_int_free(p_hwfn);
+ qed_iov_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
+ qed_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
}
}
-static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
+static int qed_init_qm_info(struct qed_hwfn *p_hwfn, bool b_sleepable)
{
+ u8 num_vports, vf_offset = 0, i, vport_id, num_ports, curr_queue = 0;
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct init_qm_port_params *p_qm_port;
- u8 num_vports, i, vport_id, num_ports;
+ bool init_rdma_offload_pq = false;
+ bool init_pure_ack_pq = false;
+ bool init_ooo_pq = false;
u16 num_pqs, multi_cos_tcs = 1;
-
+ u8 pf_wfq = qm_info->pf_wfq;
+ u32 pf_rl = qm_info->pf_rl;
+ u16 num_pf_rls = 0;
+ u16 num_vfs = 0;
+
+#ifdef CONFIG_QED_SRIOV
+ if (p_hwfn->cdev->p_iov_info)
+ num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+#endif
memset(qm_info, 0, sizeof(*qm_info));
- num_pqs = multi_cos_tcs + 1; /* The '1' is for pure-LB */
+ num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */
num_vports = (u8)RESC_NUM(p_hwfn, QED_VPORT);
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ num_pqs++; /* for RoCE queue */
+ init_rdma_offload_pq = true;
+ /* we subtract num_vfs because each require a rate limiter,
+ * and one default rate limiter
+ */
+ if (p_hwfn->pf_params.rdma_pf_params.enable_dcqcn)
+ num_pf_rls = RESC_NUM(p_hwfn, QED_RL) - num_vfs - 1;
+
+ num_pqs += num_pf_rls;
+ qm_info->num_pf_rls = (u8) num_pf_rls;
+ }
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+ num_pqs += 2; /* for iSCSI pure-ACK / OOO queue */
+ init_pure_ack_pq = true;
+ init_ooo_pq = true;
+ }
+
/* Sanity checking that setup requires legal number of resources */
if (num_pqs > RESC_NUM(p_hwfn, QED_PQ)) {
DP_ERR(p_hwfn,
@@ -160,40 +208,107 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* PQs will be arranged as follows: First per-TC PQ then pure-LB quete.
*/
- qm_info->qm_pq_params = kzalloc(sizeof(*qm_info->qm_pq_params) *
- num_pqs, GFP_KERNEL);
+ qm_info->qm_pq_params = kcalloc(num_pqs,
+ sizeof(struct init_qm_pq_params),
+ b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
if (!qm_info->qm_pq_params)
goto alloc_err;
- qm_info->qm_vport_params = kzalloc(sizeof(*qm_info->qm_vport_params) *
- num_vports, GFP_KERNEL);
+ qm_info->qm_vport_params = kcalloc(num_vports,
+ sizeof(struct init_qm_vport_params),
+ b_sleepable ? GFP_KERNEL
+ : GFP_ATOMIC);
if (!qm_info->qm_vport_params)
goto alloc_err;
- qm_info->qm_port_params = kzalloc(sizeof(*qm_info->qm_port_params) *
- MAX_NUM_PORTS, GFP_KERNEL);
+ qm_info->qm_port_params = kcalloc(MAX_NUM_PORTS,
+ sizeof(struct init_qm_port_params),
+ b_sleepable ? GFP_KERNEL
+ : GFP_ATOMIC);
if (!qm_info->qm_port_params)
goto alloc_err;
+ qm_info->wfq_data = kcalloc(num_vports, sizeof(struct qed_wfq_data),
+ b_sleepable ? GFP_KERNEL : GFP_ATOMIC);
+ if (!qm_info->wfq_data)
+ goto alloc_err;
+
vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
+ /* First init rate limited queues */
+ for (curr_queue = 0; curr_queue < num_pf_rls; curr_queue++) {
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id++;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.non_offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ qm_info->qm_pq_params[curr_queue].rl_valid = 1;
+ }
+
/* First init per-TC PQs */
for (i = 0; i < multi_cos_tcs; i++) {
- struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
-
- params->vport_id = vport_id;
- params->tc_id = p_hwfn->hw_info.non_offload_tc;
- params->wrr_group = 1;
+ struct init_qm_pq_params *params =
+ &qm_info->qm_pq_params[curr_queue++];
+
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE ||
+ p_hwfn->hw_info.personality == QED_PCI_ETH) {
+ params->vport_id = vport_id;
+ params->tc_id = p_hwfn->hw_info.non_offload_tc;
+ params->wrr_group = 1;
+ } else {
+ params->vport_id = vport_id;
+ params->tc_id = p_hwfn->hw_info.offload_tc;
+ params->wrr_group = 1;
+ }
}
/* Then init pure-LB PQ */
- qm_info->pure_lb_pq = i;
- qm_info->qm_pq_params[i].vport_id = (u8)RESC_START(p_hwfn, QED_VPORT);
- qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
- qm_info->qm_pq_params[i].wrr_group = 1;
- i++;
+ qm_info->pure_lb_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id =
+ (u8) RESC_START(p_hwfn, QED_VPORT);
+ qm_info->qm_pq_params[curr_queue].tc_id = PURE_LB_TC;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
qm_info->offload_pq = 0;
+ if (init_rdma_offload_pq) {
+ qm_info->offload_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+ }
+
+ if (init_pure_ack_pq) {
+ qm_info->pure_ack_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+ }
+
+ if (init_ooo_pq) {
+ qm_info->ooo_pq = curr_queue;
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id;
+ qm_info->qm_pq_params[curr_queue].tc_id = DCBX_ISCSI_OOO_TC;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ curr_queue++;
+ }
+
+ /* Then init per-VF PQs */
+ vf_offset = curr_queue;
+ for (i = 0; i < num_vfs; i++) {
+ /* First vport is used by the PF */
+ qm_info->qm_pq_params[curr_queue].vport_id = vport_id + i + 1;
+ qm_info->qm_pq_params[curr_queue].tc_id =
+ p_hwfn->hw_info.non_offload_tc;
+ qm_info->qm_pq_params[curr_queue].wrr_group = 1;
+ qm_info->qm_pq_params[curr_queue].rl_valid = 1;
+ curr_queue++;
+ }
+
+ qm_info->vf_queues_offset = vf_offset;
qm_info->num_pqs = num_pqs;
qm_info->num_vports = num_vports;
@@ -202,7 +317,10 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
for (i = 0; i < num_ports; i++) {
p_qm_port = &qm_info->qm_port_params[i];
p_qm_port->active = 1;
- p_qm_port->num_active_phys_tcs = 4;
+ if (num_ports == 4)
+ p_qm_port->active_phys_tcs = 0x7;
+ else
+ p_qm_port->active_phys_tcs = 0x9f;
p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
}
@@ -211,29 +329,91 @@ static int qed_init_qm_info(struct qed_hwfn *p_hwfn)
qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
- qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
+ qm_info->num_vf_pqs = num_vfs;
+ qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+
+ for (i = 0; i < qm_info->num_vports; i++)
+ qm_info->qm_vport_params[i].vport_wfq = 1;
- qm_info->pf_wfq = 0;
- qm_info->pf_rl = 0;
qm_info->vport_rl_en = 1;
+ qm_info->vport_wfq_en = 1;
+ qm_info->pf_rl = pf_rl;
+ qm_info->pf_wfq = pf_wfq;
return 0;
alloc_err:
DP_NOTICE(p_hwfn, "Failed to allocate memory for QM params\n");
- kfree(qm_info->qm_pq_params);
- kfree(qm_info->qm_vport_params);
- kfree(qm_info->qm_port_params);
-
+ qed_qm_info_free(p_hwfn);
return -ENOMEM;
}
+/* This function reconfigures the QM pf on the fly.
+ * For this purpose we:
+ * 1. reconfigure the QM database
+ * 2. set new values to runtime arrat
+ * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
+ * 4. activate init tool in QM_PF stage
+ * 5. send an sdm_qm_cmd through rbc interface to release the QM
+ */
+int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_qm_info *qm_info = &p_hwfn->qm_info;
+ bool b_rc;
+ int rc;
+
+ /* qm_info is allocated in qed_init_qm_info() which is already called
+ * from qed_resc_alloc() or previous call of qed_qm_reconf().
+ * The allocated size may change each init, so we free it before next
+ * allocation.
+ */
+ qed_qm_info_free(p_hwfn);
+
+ /* initialize qed's qm data structure */
+ rc = qed_init_qm_info(p_hwfn, false);
+ if (rc)
+ return rc;
+
+ /* stop PF's qm queues */
+ spin_lock_bh(&qm_lock);
+ b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ spin_unlock_bh(&qm_lock);
+ if (!b_rc)
+ return -EINVAL;
+
+ /* clear the QM_PF runtime phase leftovers from previous init */
+ qed_init_clear_rt_data(p_hwfn);
+
+ /* prepare QM portion of runtime array */
+ qed_qm_init_pf(p_hwfn);
+
+ /* activate init tool on runtime array */
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
+ p_hwfn->hw_info.hw_mode);
+ if (rc)
+ return rc;
+
+ /* start PF's qm queues */
+ spin_lock_bh(&qm_lock);
+ b_rc = qed_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
+ qm_info->start_pq, qm_info->num_pqs);
+ spin_unlock_bh(&qm_lock);
+ if (!b_rc)
+ return -EINVAL;
+
+ return 0;
+}
+
int qed_resc_alloc(struct qed_dev *cdev)
{
struct qed_consq *p_consq;
struct qed_eq *p_eq;
int i, rc = 0;
+ if (IS_VF(cdev))
+ return rc;
+
cdev->fw_data = kzalloc(sizeof(*cdev->fw_data), GFP_KERNEL);
if (!cdev->fw_data)
return -ENOMEM;
@@ -250,21 +430,20 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (!p_hwfn->p_tx_cids) {
DP_NOTICE(p_hwfn,
"Failed to allocate memory for Tx Cids\n");
- rc = -ENOMEM;
- goto alloc_err;
+ goto alloc_no_mem;
}
p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
if (!p_hwfn->p_rx_cids) {
DP_NOTICE(p_hwfn,
"Failed to allocate memory for Rx Cids\n");
- rc = -ENOMEM;
- goto alloc_err;
+ goto alloc_no_mem;
}
}
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ u32 n_eqes, num_cons;
/* First allocate the context manager structure */
rc = qed_cxt_mngr_alloc(p_hwfn);
@@ -279,7 +458,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_err;
/* Prepare and process QM requirements */
- rc = qed_init_qm_info(p_hwfn);
+ rc = qed_init_qm_info(p_hwfn, true);
if (rc)
goto alloc_err;
@@ -308,19 +487,40 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (rc)
goto alloc_err;
+ rc = qed_iov_alloc(p_hwfn);
+ if (rc)
+ goto alloc_err;
+
/* EQ */
- p_eq = qed_eq_alloc(p_hwfn, 256);
- if (!p_eq) {
- rc = -ENOMEM;
+ n_eqes = qed_chain_get_capacity(&p_hwfn->p_spq->chain);
+ if (p_hwfn->hw_info.personality == QED_PCI_ETH_ROCE) {
+ num_cons = qed_cxt_get_proto_cid_count(p_hwfn,
+ PROTOCOLID_ROCE,
+ 0) * 2;
+ n_eqes += num_cons + 2 * MAX_NUM_VFS_BB;
+ } else if (p_hwfn->hw_info.personality == QED_PCI_ISCSI) {
+ num_cons =
+ qed_cxt_get_proto_cid_count(p_hwfn,
+ PROTOCOLID_ISCSI, 0);
+ n_eqes += 2 * num_cons;
+ }
+
+ if (n_eqes > 0xFFFF) {
+ DP_ERR(p_hwfn,
+ "Cannot allocate 0x%x EQ elements. The maximum of a u16 chain is 0x%x\n",
+ n_eqes, 0xFFFF);
+ rc = -EINVAL;
goto alloc_err;
}
+
+ p_eq = qed_eq_alloc(p_hwfn, (u16) n_eqes);
+ if (!p_eq)
+ goto alloc_no_mem;
p_hwfn->p_eq = p_eq;
p_consq = qed_consq_alloc(p_hwfn);
- if (!p_consq) {
- rc = -ENOMEM;
- goto alloc_err;
- }
+ if (!p_consq)
+ goto alloc_no_mem;
p_hwfn->p_consq = p_consq;
/* DMA info initialization */
@@ -330,6 +530,14 @@ int qed_resc_alloc(struct qed_dev *cdev)
"Failed to allocate memory for dmae_info structure\n");
goto alloc_err;
}
+
+ /* DCBX initialization */
+ rc = qed_dcbx_info_alloc(p_hwfn);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate memory for dcbx structure\n");
+ goto alloc_err;
+ }
}
cdev->reset_stats = kzalloc(sizeof(*cdev->reset_stats), GFP_KERNEL);
@@ -341,6 +549,8 @@ int qed_resc_alloc(struct qed_dev *cdev)
return 0;
+alloc_no_mem:
+ rc = -ENOMEM;
alloc_err:
qed_resc_free(cdev);
return rc;
@@ -350,6 +560,9 @@ void qed_resc_setup(struct qed_dev *cdev)
{
int i;
+ if (IS_VF(cdev))
+ return;
+
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
@@ -365,14 +578,15 @@ void qed_resc_setup(struct qed_dev *cdev)
p_hwfn->mcp_info->mfw_mb_length);
qed_int_setup(p_hwfn, p_hwfn->p_main_ptt);
+
+ qed_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
}
}
#define FINAL_CLEANUP_POLL_CNT (100)
#define FINAL_CLEANUP_POLL_TIME (10)
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 id)
+ struct qed_ptt *p_ptt, u16 id, bool is_vf)
{
u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
int rc = -EBUSY;
@@ -380,6 +594,9 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
addr = GTT_BAR0_MAP_REG_USDM_RAM +
USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
+ if (is_vf)
+ id += 0x10;
+
command |= X_FINAL_CLEANUP_AGG_INT <<
SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
@@ -453,7 +670,14 @@ static void qed_calc_hw_mode(struct qed_hwfn *p_hwfn)
hw_mode |= 1 << MODE_ASIC;
+ if (p_hwfn->cdev->num_hwfns > 1)
+ hw_mode |= 1 << MODE_100G;
+
p_hwfn->hw_info.hw_mode = hw_mode;
+
+ DP_VERBOSE(p_hwfn, (NETIF_MSG_PROBE | NETIF_MSG_IFUP),
+ "Configuring function for hw_mode: 0x%08x\n",
+ p_hwfn->hw_info.hw_mode);
}
/* Init run time data for all PFs on an engine. */
@@ -492,7 +716,10 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_common_rt_init_params params;
struct qed_dev *cdev = p_hwfn->cdev;
+ u16 num_pfs, pf_id;
+ u32 concrete_fid;
int rc = 0;
+ u8 vf_id;
qed_init_cau_rt_data(cdev);
@@ -538,9 +765,24 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
- /* Disable relaxed ordering in the PCI config space */
- qed_wr(p_hwfn, p_ptt, 0x20b4,
- qed_rd(p_hwfn, p_ptt, 0x20b4) & ~0x10);
+ if (QED_IS_BB(p_hwfn->cdev)) {
+ num_pfs = NUM_OF_ENG_PFS(p_hwfn->cdev);
+ for (pf_id = 0; pf_id < num_pfs; pf_id++) {
+ qed_fid_pretend(p_hwfn, p_ptt, pf_id);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_ROCE, 0x0);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
+ }
+ /* pretend to original PF */
+ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+ }
+
+ for (vf_id = 0; vf_id < MAX_NUM_VFS_BB; vf_id++) {
+ concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
+ qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
+ }
+ /* pretend to original PF */
+ qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
return rc;
}
@@ -551,13 +793,37 @@ static int qed_hw_init_port(struct qed_hwfn *p_hwfn,
{
int rc = 0;
- rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
- hw_mode);
+ rc = qed_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, hw_mode);
+ if (rc != 0)
+ return rc;
+
+ if (hw_mode & (1 << MODE_MF_SI)) {
+ u8 pf_id = 0;
+
+ if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "PF[%08x] is first eth on engine\n", pf_id);
+
+ /* We should have configured BIT for ppfid, i.e., the
+ * relative function number in the port. But there's a
+ * bug in LLH in BB where the ppfid is actually engine
+ * based, so we need to take this into account.
+ */
+ qed_wr(p_hwfn, p_ptt,
+ NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR, 1 << pf_id);
+ }
+
+ /* Take the protocol-based hit vector if there is a hit,
+ * otherwise take the other vector.
+ */
+ qed_wr(p_hwfn, p_ptt, NIG_REG_LLH_CLS_TYPE_DUALMODE, 0x2);
+ }
return rc;
}
static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
+ struct qed_tunn_start_params *p_tunn,
int hw_mode,
bool b_hw_start,
enum qed_int_mode int_mode,
@@ -574,7 +840,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
/* Update rate limit once we'll actually have a link */
- p_hwfn->qm_info.pf_rl = 100;
+ p_hwfn->qm_info.pf_rl = 100000;
}
qed_cxt_hw_init_pf(p_hwfn);
@@ -598,12 +864,13 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
}
/* Protocl Configuration */
- STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
+ STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
+ (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_ROCE_RT_OFFSET, 0);
/* Cleanup chip from previous driver if such remains exist */
- rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id);
+ rc = qed_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
if (rc != 0)
return rc;
@@ -620,12 +887,28 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
/* Pure runtime initializations - directly to the HW */
qed_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
+ if (hw_mode & (1 << MODE_MF_SI)) {
+ u8 pf_id = 0;
+ u32 val = 0;
+
+ if (!qed_hw_init_first_eth(p_hwfn, p_ptt, &pf_id)) {
+ if (p_hwfn->rel_pf_id == pf_id) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
+ "PF[%d] is first ETH on engine\n",
+ pf_id);
+ val = 1;
+ }
+ qed_wr(p_hwfn, p_ptt, PRS_REG_MSG_INFO, val);
+ }
+ }
+
if (b_hw_start) {
/* enable interrupts */
qed_int_igu_enable(p_hwfn, p_ptt, int_mode);
/* send function start command */
- rc = qed_sp_pf_start(p_hwfn, p_hwfn->cdev->mf_mode);
+ rc = qed_sp_pf_start(p_hwfn, p_tunn, p_hwfn->cdev->mf_mode,
+ allow_npar_tx_switch);
if (rc)
DP_NOTICE(p_hwfn, "Function start ramrod failed\n");
}
@@ -672,6 +955,7 @@ static void qed_reset_mb_shadow(struct qed_hwfn *p_hwfn,
}
int qed_hw_init(struct qed_dev *cdev,
+ struct qed_tunn_start_params *p_tunn,
bool b_hw_start,
enum qed_int_mode int_mode,
bool allow_npar_tx_switch,
@@ -680,13 +964,25 @@ int qed_hw_init(struct qed_dev *cdev,
u32 load_code, param;
int rc, mfw_rc, i;
- rc = qed_init_fw_data(cdev, bin_fw_data);
- if (rc != 0)
- return rc;
+ if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+ DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+ return -EINVAL;
+ }
+
+ if (IS_PF(cdev)) {
+ rc = qed_init_fw_data(cdev, bin_fw_data);
+ if (rc != 0)
+ return rc;
+ }
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ if (IS_VF(cdev)) {
+ p_hwfn->b_int_enabled = 1;
+ continue;
+ }
+
/* Enable DMAE in PXP */
rc = qed_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
@@ -708,6 +1004,11 @@ int qed_hw_init(struct qed_dev *cdev,
p_hwfn->first_on_engine = (load_code ==
FW_MSG_CODE_DRV_LOAD_ENGINE);
+ if (!qm_lock_init) {
+ spin_lock_init(&qm_lock);
+ qm_lock_init = true;
+ }
+
switch (load_code) {
case FW_MSG_CODE_DRV_LOAD_ENGINE:
rc = qed_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
@@ -724,7 +1025,7 @@ int qed_hw_init(struct qed_dev *cdev,
/* Fall into */
case FW_MSG_CODE_DRV_LOAD_FUNCTION:
rc = qed_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
- p_hwfn->hw_info.hw_mode,
+ p_tunn, p_hwfn->hw_info.hw_mode,
b_hw_start, int_mode,
allow_npar_tx_switch);
break;
@@ -749,6 +1050,20 @@ int qed_hw_init(struct qed_dev *cdev,
return mfw_rc;
}
+ /* send DCBX attention request command */
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DCB,
+ "sending phony dcbx set command to trigger DCBx attention handling\n");
+ mfw_rc = qed_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
+ DRV_MSG_CODE_SET_DCBX,
+ 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
+ &load_code, &param);
+ if (mfw_rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send DCBX attention request\n");
+ return mfw_rc;
+ }
+
p_hwfn->hw_init_done = true;
}
@@ -811,6 +1126,11 @@ int qed_hw_stop(struct qed_dev *cdev)
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Stopping hw/fw\n");
+ if (IS_VF(cdev)) {
+ qed_vf_pf_int_cleanup(p_hwfn);
+ continue;
+ }
+
/* mark the hw as uninitialized... */
p_hwfn->hw_init_done = false;
@@ -842,15 +1162,16 @@ int qed_hw_stop(struct qed_dev *cdev)
usleep_range(1000, 2000);
}
- /* Disable DMAE in PXP - in CMT, this should only be done for
- * first hw-function, and only after all transactions have
- * stopped for all active hw-functions.
- */
- t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
- cdev->hwfns[0].p_main_ptt,
- false);
- if (t_rc != 0)
- rc = t_rc;
+ if (IS_PF(cdev)) {
+ /* Disable DMAE in PXP - in CMT, this should only be done for
+ * first hw-function, and only after all transactions have
+ * stopped for all active hw-functions.
+ */
+ t_rc = qed_change_pci_hwfn(&cdev->hwfns[0],
+ cdev->hwfns[0].p_main_ptt, false);
+ if (t_rc != 0)
+ rc = t_rc;
+ }
return rc;
}
@@ -861,7 +1182,12 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
for_each_hwfn(cdev, j) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[j];
- struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+ struct qed_ptt *p_ptt = p_hwfn->p_main_ptt;
+
+ if (IS_VF(cdev)) {
+ qed_vf_pf_int_cleanup(p_hwfn);
+ continue;
+ }
DP_VERBOSE(p_hwfn,
NETIF_MSG_IFDOWN,
@@ -885,6 +1211,9 @@ void qed_hw_stop_fastpath(struct qed_dev *cdev)
void qed_hw_start_fastpath(struct qed_hwfn *p_hwfn)
{
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
/* Re-open incoming traffic */
qed_wr(p_hwfn, p_hwfn->p_main_ptt,
NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
@@ -914,6 +1243,13 @@ int qed_hw_reset(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ if (IS_VF(cdev)) {
+ rc = qed_vf_pf_reset(p_hwfn);
+ if (rc)
+ return rc;
+ continue;
+ }
+
DP_VERBOSE(p_hwfn, NETIF_MSG_IFDOWN, "Resetting hw/fw\n");
/* Check for incorrect states */
@@ -1006,16 +1342,23 @@ static void qed_hw_set_feat(struct qed_hwfn *p_hwfn)
num_features);
}
-static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
+static int qed_hw_get_resc(struct qed_hwfn *p_hwfn)
{
+ u8 enabled_func_idx = p_hwfn->enabled_func_idx;
u32 *resc_start = p_hwfn->hw_info.resc_start;
+ u8 num_funcs = p_hwfn->num_funcs_on_engine;
u32 *resc_num = p_hwfn->hw_info.resc_num;
struct qed_sb_cnt_info sb_cnt_info;
- int num_funcs, i;
-
- num_funcs = MAX_NUM_PFS_BB;
+ int i, max_vf_vlan_filters;
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+
+#ifdef CONFIG_QED_SRIOV
+ max_vf_vlan_filters = QED_ETH_MAX_VF_NUM_VLAN_FILTERS;
+#else
+ max_vf_vlan_filters = 0;
+#endif
+
qed_int_get_num_sbs(p_hwfn, &sb_cnt_info);
resc_num[QED_SB] = min_t(u32,
@@ -1025,14 +1368,22 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
resc_num[QED_VPORT] = MAX_NUM_VPORTS_BB / num_funcs;
resc_num[QED_RSS_ENG] = ETH_RSS_ENGINE_NUM_BB / num_funcs;
resc_num[QED_PQ] = MAX_QM_TX_QUEUES_BB / num_funcs;
- resc_num[QED_RL] = 8;
+ resc_num[QED_RL] = min_t(u32, 64, resc_num[QED_VPORT]);
resc_num[QED_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
resc_num[QED_VLAN] = (ETH_NUM_VLAN_FILTERS - 1 /*For vlan0*/) /
num_funcs;
- resc_num[QED_ILT] = 950;
+ resc_num[QED_ILT] = PXP_NUM_ILT_RECORDS_BB / num_funcs;
for (i = 0; i < QED_MAX_RESC; i++)
- resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
+ resc_start[i] = resc_num[i] * enabled_func_idx;
+
+ /* Sanity for ILT */
+ if (RESC_END(p_hwfn, QED_ILT) > PXP_NUM_ILT_RECORDS_BB) {
+ DP_NOTICE(p_hwfn, "Can't assign ILT pages [%08x,...,%08x]\n",
+ RESC_START(p_hwfn, QED_ILT),
+ RESC_END(p_hwfn, QED_ILT) - 1);
+ return -EINVAL;
+ }
qed_hw_set_feat(p_hwfn);
@@ -1062,6 +1413,8 @@ static void qed_hw_get_resc(struct qed_hwfn *p_hwfn)
p_hwfn->hw_info.resc_start[QED_VLAN],
p_hwfn->hw_info.resc_num[QED_ILT],
p_hwfn->hw_info.resc_start[QED_ILT]);
+
+ return 0;
}
static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
@@ -1091,31 +1444,31 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X40G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X50G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X100G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_F;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X10G_E;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_4X20G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X40G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_2X25G;
break;
- case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
+ case NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G:
p_hwfn->hw_info.port_mode = QED_PORT_MODE_DE_1X25G;
break;
default:
@@ -1160,7 +1513,7 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
link->speed.forced_speed = 50000;
break;
- case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
+ case NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G:
link->speed.forced_speed = 100000;
break;
default:
@@ -1216,10 +1569,71 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn,
if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
__set_bit(QED_DEV_CAP_ETH,
&p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI)
+ __set_bit(QED_DEV_CAP_ISCSI,
+ &p_hwfn->hw_info.device_capabilities);
+ if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE)
+ __set_bit(QED_DEV_CAP_ROCE,
+ &p_hwfn->hw_info.device_capabilities);
return qed_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
}
+static void qed_get_num_funcs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u8 num_funcs, enabled_func_idx = p_hwfn->rel_pf_id;
+ u32 reg_function_hide, tmp, eng_mask, low_pfs_mask;
+
+ num_funcs = MAX_NUM_PFS_BB;
+
+ /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
+ * in the other bits are selected.
+ * Bits 1-15 are for functions 1-15, respectively, and their value is
+ * '0' only for enabled functions (function 0 always exists and
+ * enabled).
+ * In case of CMT, only the "even" functions are enabled, and thus the
+ * number of functions for both hwfns is learnt from the same bits.
+ */
+ reg_function_hide = qed_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
+
+ if (reg_function_hide & 0x1) {
+ if (QED_PATH_ID(p_hwfn) && p_hwfn->cdev->num_hwfns == 1) {
+ num_funcs = 0;
+ eng_mask = 0xaaaa;
+ } else {
+ num_funcs = 1;
+ eng_mask = 0x5554;
+ }
+
+ /* Get the number of the enabled functions on the engine */
+ tmp = (reg_function_hide ^ 0xffffffff) & eng_mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ num_funcs++;
+ tmp >>= 0x1;
+ }
+
+ /* Get the PF index within the enabled functions */
+ low_pfs_mask = (0x1 << p_hwfn->abs_pf_id) - 1;
+ tmp = reg_function_hide & eng_mask & low_pfs_mask;
+ while (tmp) {
+ if (tmp & 0x1)
+ enabled_func_idx--;
+ tmp >>= 0x1;
+ }
+ }
+
+ p_hwfn->num_funcs_on_engine = num_funcs;
+ p_hwfn->enabled_func_idx = enabled_func_idx;
+
+ DP_VERBOSE(p_hwfn,
+ NETIF_MSG_PROBE,
+ "PF [rel_id %d, abs_id %d] within the %d enabled functions on the engine\n",
+ p_hwfn->rel_pf_id,
+ p_hwfn->abs_pf_id,
+ p_hwfn->num_funcs_on_engine);
+}
+
static int
qed_get_hw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -1228,6 +1642,13 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
u32 port_mode;
int rc;
+ /* Since all information is common, only first hwfns should do this */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ rc = qed_iov_hw_info(p_hwfn);
+ if (rc)
+ return rc;
+ }
+
/* Read the port mode */
port_mode = qed_rd(p_hwfn, p_ptt,
CNIG_REG_NW_PORT_MODE_BB_B0);
@@ -1271,9 +1692,9 @@ qed_get_hw_info(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.personality = protocol;
}
- qed_hw_get_resc(p_hwfn);
+ qed_get_num_funcs(p_hwfn, p_ptt);
- return rc;
+ return qed_hw_get_resc(p_hwfn);
}
static int qed_get_dev_info(struct qed_dev *cdev)
@@ -1336,6 +1757,9 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
p_hwfn->regview = p_regview;
p_hwfn->doorbells = p_doorbells;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_hw_prepare(p_hwfn);
+
/* Validate that chip access is feasible */
if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
DP_ERR(p_hwfn,
@@ -1387,6 +1811,8 @@ static int qed_hw_prepare_single(struct qed_hwfn *p_hwfn,
return rc;
err2:
+ if (IS_LEAD_HWFN(p_hwfn))
+ qed_iov_free_hw_info(p_hwfn->cdev);
qed_mcp_free(p_hwfn);
err1:
qed_hw_hwfn_free(p_hwfn);
@@ -1401,7 +1827,8 @@ int qed_hw_prepare(struct qed_dev *cdev,
int rc;
/* Store the precompiled init data ptrs */
- qed_init_iro_array(cdev);
+ if (IS_PF(cdev))
+ qed_init_iro_array(cdev);
/* Initialize the first hwfn - will learn number of hwfns */
rc = qed_hw_prepare_single(p_hwfn,
@@ -1433,9 +1860,11 @@ int qed_hw_prepare(struct qed_dev *cdev,
* initiliazed hwfn 0.
*/
if (rc) {
- qed_init_free(p_hwfn);
- qed_mcp_free(p_hwfn);
- qed_hw_hwfn_free(p_hwfn);
+ if (IS_PF(cdev)) {
+ qed_init_free(p_hwfn);
+ qed_mcp_free(p_hwfn);
+ qed_hw_hwfn_free(p_hwfn);
+ }
}
}
@@ -1449,98 +1878,298 @@ void qed_hw_remove(struct qed_dev *cdev)
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ if (IS_VF(cdev)) {
+ qed_vf_pf_release(p_hwfn);
+ continue;
+ }
+
qed_init_free(p_hwfn);
qed_hw_hwfn_free(p_hwfn);
qed_mcp_free(p_hwfn);
}
+
+ qed_iov_free_hw_info(cdev);
}
-int qed_chain_alloc(struct qed_dev *cdev,
- enum qed_chain_use_mode intended_use,
- enum qed_chain_mode mode,
- u16 num_elems,
- size_t elem_size,
- struct qed_chain *p_chain)
+static void qed_chain_free_next_ptr(struct qed_dev *cdev,
+ struct qed_chain *p_chain)
{
- dma_addr_t p_pbl_phys = 0;
- void *p_pbl_virt = NULL;
+ void *p_virt = p_chain->p_virt_addr, *p_virt_next = NULL;
+ dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
+ struct qed_chain_next *p_next;
+ u32 size, i;
+
+ if (!p_virt)
+ return;
+
+ size = p_chain->elem_size * p_chain->usable_per_page;
+
+ for (i = 0; i < p_chain->page_cnt; i++) {
+ if (!p_virt)
+ break;
+
+ p_next = (struct qed_chain_next *)((u8 *)p_virt + size);
+ p_virt_next = p_next->next_virt;
+ p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
+
+ dma_free_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE, p_virt, p_phys);
+
+ p_virt = p_virt_next;
+ p_phys = p_phys_next;
+ }
+}
+
+static void qed_chain_free_single(struct qed_dev *cdev,
+ struct qed_chain *p_chain)
+{
+ if (!p_chain->p_virt_addr)
+ return;
+
+ dma_free_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ p_chain->p_virt_addr, p_chain->p_phys_addr);
+}
+
+static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+ void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
+ u32 page_cnt = p_chain->page_cnt, i, pbl_size;
+ u8 *p_pbl_virt = p_chain->pbl.p_virt_table;
+
+ if (!pp_virt_addr_tbl)
+ return;
+
+ if (!p_chain->pbl.p_virt_table)
+ goto out;
+
+ for (i = 0; i < page_cnt; i++) {
+ if (!pp_virt_addr_tbl[i])
+ break;
+
+ dma_free_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ pp_virt_addr_tbl[i],
+ *(dma_addr_t *)p_pbl_virt);
+
+ p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+ }
+
+ pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+ dma_free_coherent(&cdev->pdev->dev,
+ pbl_size,
+ p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table);
+out:
+ vfree(p_chain->pbl.pp_virt_addr_tbl);
+}
+
+void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+ switch (p_chain->mode) {
+ case QED_CHAIN_MODE_NEXT_PTR:
+ qed_chain_free_next_ptr(cdev, p_chain);
+ break;
+ case QED_CHAIN_MODE_SINGLE:
+ qed_chain_free_single(cdev, p_chain);
+ break;
+ case QED_CHAIN_MODE_PBL:
+ qed_chain_free_pbl(cdev, p_chain);
+ break;
+ }
+}
+
+static int
+qed_chain_alloc_sanity_check(struct qed_dev *cdev,
+ enum qed_chain_cnt_type cnt_type,
+ size_t elem_size, u32 page_cnt)
+{
+ u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
+
+ /* The actual chain size can be larger than the maximal possible value
+ * after rounding up the requested elements number to pages, and after
+ * taking into acount the unusuable elements (next-ptr elements).
+ * The size of a "u16" chain can be (U16_MAX + 1) since the chain
+ * size/capacity fields are of a u32 type.
+ */
+ if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
+ chain_size > 0x10000) ||
+ (cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
+ chain_size > 0x100000000ULL)) {
+ DP_NOTICE(cdev,
+ "The actual chain size (0x%llx) is larger than the maximal possible value\n",
+ chain_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qed_chain_alloc_next_ptr(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+ void *p_virt = NULL, *p_virt_prev = NULL;
dma_addr_t p_phys = 0;
- void *p_virt = NULL;
- u16 page_cnt = 0;
- size_t size;
+ u32 i;
- if (mode == QED_CHAIN_MODE_SINGLE)
- page_cnt = 1;
- else
- page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
+ for (i = 0; i < p_chain->page_cnt; i++) {
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ return -ENOMEM;
+ }
+
+ if (i == 0) {
+ qed_chain_init_mem(p_chain, p_virt, p_phys);
+ qed_chain_reset(p_chain);
+ } else {
+ qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+ p_virt, p_phys);
+ }
+
+ p_virt_prev = p_virt;
+ }
+ /* Last page's next element should point to the beginning of the
+ * chain.
+ */
+ qed_chain_init_next_ptr_elem(p_chain, p_virt_prev,
+ p_chain->p_virt_addr,
+ p_chain->p_phys_addr);
+
+ return 0;
+}
+
+static int
+qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+ dma_addr_t p_phys = 0;
+ void *p_virt = NULL;
- size = page_cnt * QED_CHAIN_PAGE_SIZE;
p_virt = dma_alloc_coherent(&cdev->pdev->dev,
- size, &p_phys, GFP_KERNEL);
+ QED_CHAIN_PAGE_SIZE, &p_phys, GFP_KERNEL);
if (!p_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain mem\n");
- goto nomem;
+ DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ return -ENOMEM;
}
- if (mode == QED_CHAIN_MODE_PBL) {
- size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
- p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
- size, &p_pbl_phys,
- GFP_KERNEL);
- if (!p_pbl_virt) {
- DP_NOTICE(cdev, "Failed to allocate chain pbl mem\n");
- goto nomem;
- }
+ qed_chain_init_mem(p_chain, p_virt, p_phys);
+ qed_chain_reset(p_chain);
- qed_chain_pbl_init(p_chain, p_virt, p_phys, page_cnt,
- (u8)elem_size, intended_use,
- p_pbl_phys, p_pbl_virt);
- } else {
- qed_chain_init(p_chain, p_virt, p_phys, page_cnt,
- (u8)elem_size, intended_use, mode);
+ return 0;
+}
+
+static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
+{
+ u32 page_cnt = p_chain->page_cnt, size, i;
+ dma_addr_t p_phys = 0, p_pbl_phys = 0;
+ void **pp_virt_addr_tbl = NULL;
+ u8 *p_pbl_virt = NULL;
+ void *p_virt = NULL;
+
+ size = page_cnt * sizeof(*pp_virt_addr_tbl);
+ pp_virt_addr_tbl = vmalloc(size);
+ if (!pp_virt_addr_tbl) {
+ DP_NOTICE(cdev,
+ "Failed to allocate memory for the chain virtual addresses table\n");
+ return -ENOMEM;
}
+ memset(pp_virt_addr_tbl, 0, size);
- return 0;
+ /* The allocation of the PBL table is done with its full size, since it
+ * is expected to be successive.
+ * qed_chain_init_pbl_mem() is called even in a case of an allocation
+ * failure, since pp_virt_addr_tbl was previously allocated, and it
+ * should be saved to allow its freeing during the error flow.
+ */
+ size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
+ p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ size, &p_pbl_phys, GFP_KERNEL);
+ qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
+ pp_virt_addr_tbl);
+ if (!p_pbl_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain pbl memory\n");
+ return -ENOMEM;
+ }
-nomem:
- dma_free_coherent(&cdev->pdev->dev,
- page_cnt * QED_CHAIN_PAGE_SIZE,
- p_virt, p_phys);
- dma_free_coherent(&cdev->pdev->dev,
- page_cnt * QED_CHAIN_PBL_ENTRY_SIZE,
- p_pbl_virt, p_pbl_phys);
+ for (i = 0; i < page_cnt; i++) {
+ p_virt = dma_alloc_coherent(&cdev->pdev->dev,
+ QED_CHAIN_PAGE_SIZE,
+ &p_phys, GFP_KERNEL);
+ if (!p_virt) {
+ DP_NOTICE(cdev, "Failed to allocate chain memory\n");
+ return -ENOMEM;
+ }
- return -ENOMEM;
+ if (i == 0) {
+ qed_chain_init_mem(p_chain, p_virt, p_phys);
+ qed_chain_reset(p_chain);
+ }
+
+ /* Fill the PBL table with the physical address of the page */
+ *(dma_addr_t *)p_pbl_virt = p_phys;
+ /* Keep the virtual address of the page */
+ p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
+
+ p_pbl_virt += QED_CHAIN_PBL_ENTRY_SIZE;
+ }
+
+ return 0;
}
-void qed_chain_free(struct qed_dev *cdev,
- struct qed_chain *p_chain)
+int qed_chain_alloc(struct qed_dev *cdev,
+ enum qed_chain_use_mode intended_use,
+ enum qed_chain_mode mode,
+ enum qed_chain_cnt_type cnt_type,
+ u32 num_elems, size_t elem_size, struct qed_chain *p_chain)
{
- size_t size;
+ u32 page_cnt;
+ int rc = 0;
- if (!p_chain->p_virt_addr)
- return;
+ if (mode == QED_CHAIN_MODE_SINGLE)
+ page_cnt = 1;
+ else
+ page_cnt = QED_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
- if (p_chain->mode == QED_CHAIN_MODE_PBL) {
- size = p_chain->page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
- dma_free_coherent(&cdev->pdev->dev, size,
- p_chain->pbl.p_virt_table,
- p_chain->pbl.p_phys_table);
+ rc = qed_chain_alloc_sanity_check(cdev, cnt_type, elem_size, page_cnt);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Cannot allocate a chain with the given arguments:\n"
+ "[use_mode %d, mode %d, cnt_type %d, num_elems %d, elem_size %zu]\n",
+ intended_use, mode, cnt_type, num_elems, elem_size);
+ return rc;
}
- size = p_chain->page_cnt * QED_CHAIN_PAGE_SIZE;
- dma_free_coherent(&cdev->pdev->dev, size,
- p_chain->p_virt_addr,
- p_chain->p_phys_addr);
+ qed_chain_init_params(p_chain, page_cnt, (u8) elem_size, intended_use,
+ mode, cnt_type);
+
+ switch (mode) {
+ case QED_CHAIN_MODE_NEXT_PTR:
+ rc = qed_chain_alloc_next_ptr(cdev, p_chain);
+ break;
+ case QED_CHAIN_MODE_SINGLE:
+ rc = qed_chain_alloc_single(cdev, p_chain);
+ break;
+ case QED_CHAIN_MODE_PBL:
+ rc = qed_chain_alloc_pbl(cdev, p_chain);
+ break;
+ }
+ if (rc)
+ goto nomem;
+
+ return 0;
+
+nomem:
+ qed_chain_free(cdev, p_chain);
+ return rc;
}
-int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
- u16 src_id, u16 *dst_id)
+int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
{
if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
u16 min, max;
- min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
+ min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE);
max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
DP_NOTICE(p_hwfn,
"l2_queue id [%d] is not valid, available indices [%d - %d]\n",
@@ -1593,3 +2222,499 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
return 0;
}
+
+static int qed_set_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 hw_addr, void *p_eth_qzone,
+ size_t eth_qzone_size, u8 timeset)
+{
+ struct coalescing_timeset *p_coal_timeset;
+
+ if (p_hwfn->cdev->int_coalescing_mode != QED_COAL_MODE_ENABLE) {
+ DP_NOTICE(p_hwfn, "Coalescing configuration not enabled\n");
+ return -EINVAL;
+ }
+
+ p_coal_timeset = p_eth_qzone;
+ memset(p_coal_timeset, 0, eth_qzone_size);
+ SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_TIMESET, timeset);
+ SET_FIELD(p_coal_timeset->value, COALESCING_TIMESET_VALID, 1);
+ qed_memcpy_to(p_hwfn, p_ptt, hw_addr, p_eth_qzone, eth_qzone_size);
+
+ return 0;
+}
+
+int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 coalesce, u8 qid, u16 sb_id)
+{
+ struct ustorm_eth_queue_zone eth_qzone;
+ u8 timeset, timer_res;
+ u16 fw_qid = 0;
+ u32 address;
+ int rc;
+
+ /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+ if (coalesce <= 0x7F) {
+ timer_res = 0;
+ } else if (coalesce <= 0xFF) {
+ timer_res = 1;
+ } else if (coalesce <= 0x1FF) {
+ timer_res = 2;
+ } else {
+ DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+ return -EINVAL;
+ }
+ timeset = (u8)(coalesce >> timer_res);
+
+ rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
+ if (rc)
+ return rc;
+
+ rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, false);
+ if (rc)
+ goto out;
+
+ address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+
+ rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
+ sizeof(struct ustorm_eth_queue_zone), timeset);
+ if (rc)
+ goto out;
+
+ p_hwfn->cdev->rx_coalesce_usecs = coalesce;
+out:
+ return rc;
+}
+
+int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 coalesce, u8 qid, u16 sb_id)
+{
+ struct xstorm_eth_queue_zone eth_qzone;
+ u8 timeset, timer_res;
+ u16 fw_qid = 0;
+ u32 address;
+ int rc;
+
+ /* Coalesce = (timeset << timer-resolution), timeset is 7bit wide */
+ if (coalesce <= 0x7F) {
+ timer_res = 0;
+ } else if (coalesce <= 0xFF) {
+ timer_res = 1;
+ } else if (coalesce <= 0x1FF) {
+ timer_res = 2;
+ } else {
+ DP_ERR(p_hwfn, "Invalid coalesce value - %d\n", coalesce);
+ return -EINVAL;
+ }
+ timeset = (u8)(coalesce >> timer_res);
+
+ rc = qed_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
+ if (rc)
+ return rc;
+
+ rc = qed_int_set_timer_res(p_hwfn, p_ptt, timer_res, sb_id, true);
+ if (rc)
+ goto out;
+
+ address = BAR0_MAP_REG_XSDM_RAM + XSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
+
+ rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
+ sizeof(struct xstorm_eth_queue_zone), timeset);
+ if (rc)
+ goto out;
+
+ p_hwfn->cdev->tx_coalesce_usecs = coalesce;
+out:
+ return rc;
+}
+
+/* Calculate final WFQ values for all vports and configure them.
+ * After this configuration each vport will have
+ * approx min rate = min_pf_rate * (vport_wfq / QED_WFQ_UNIT)
+ */
+static void qed_configure_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ struct init_qm_vport_params *vport_params;
+ int i;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+
+ vport_params[i].vport_wfq = (wfq_speed * QED_WFQ_UNIT) /
+ min_pf_rate;
+ qed_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].vport_wfq);
+ }
+}
+
+static void qed_init_wfq_default_param(struct qed_hwfn *p_hwfn,
+ u32 min_pf_rate)
+
+{
+ int i;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++)
+ p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
+}
+
+static void qed_disable_wfq_for_all_vports(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ struct init_qm_vport_params *vport_params;
+ int i;
+
+ vport_params = p_hwfn->qm_info.qm_vport_params;
+
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ qed_init_wfq_default_param(p_hwfn, min_pf_rate);
+ qed_init_vport_wfq(p_hwfn, p_ptt,
+ vport_params[i].first_tx_pq_id,
+ vport_params[i].vport_wfq);
+ }
+}
+
+/* This function performs several validations for WFQ
+ * configuration and required min rate for a given vport
+ * 1. req_rate must be greater than one percent of min_pf_rate.
+ * 2. req_rate should not cause other vports [not configured for WFQ explicitly]
+ * rates to get less than one percent of min_pf_rate.
+ * 3. total_req_min_rate [all vports min rate sum] shouldn't exceed min_pf_rate.
+ */
+static int qed_init_wfq_param(struct qed_hwfn *p_hwfn,
+ u16 vport_id, u32 req_rate,
+ u32 min_pf_rate)
+{
+ u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
+ int non_requested_count = 0, req_count = 0, i, num_vports;
+
+ num_vports = p_hwfn->qm_info.num_vports;
+
+ /* Accounting for the vports which are configured for WFQ explicitly */
+ for (i = 0; i < num_vports; i++) {
+ u32 tmp_speed;
+
+ if ((i != vport_id) &&
+ p_hwfn->qm_info.wfq_data[i].configured) {
+ req_count++;
+ tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
+ total_req_min_rate += tmp_speed;
+ }
+ }
+
+ /* Include current vport data as well */
+ req_count++;
+ total_req_min_rate += req_rate;
+ non_requested_count = num_vports - req_count;
+
+ if (req_rate < min_pf_rate / QED_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Vport [%d] - Requested rate[%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ vport_id, req_rate, min_pf_rate);
+ return -EINVAL;
+ }
+
+ if (num_vports > QED_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Number of vports is greater than %d\n",
+ QED_WFQ_UNIT);
+ return -EINVAL;
+ }
+
+ if (total_req_min_rate > min_pf_rate) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Total requested min rate for all vports[%d Mbps] is greater than configured PF min rate[%d Mbps]\n",
+ total_req_min_rate, min_pf_rate);
+ return -EINVAL;
+ }
+
+ total_left_rate = min_pf_rate - total_req_min_rate;
+
+ left_rate_per_vp = total_left_rate / non_requested_count;
+ if (left_rate_per_vp < min_pf_rate / QED_WFQ_UNIT) {
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Non WFQ configured vports rate [%d Mbps] is less than one percent of configured PF min rate[%d Mbps]\n",
+ left_rate_per_vp, min_pf_rate);
+ return -EINVAL;
+ }
+
+ p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
+ p_hwfn->qm_info.wfq_data[vport_id].configured = true;
+
+ for (i = 0; i < num_vports; i++) {
+ if (p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
+ }
+
+ return 0;
+}
+
+static int __qed_configure_vport_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 vp_id, u32 rate)
+{
+ struct qed_mcp_link_state *p_link;
+ int rc = 0;
+
+ p_link = &p_hwfn->cdev->hwfns[0].mcp_info->link_output;
+
+ if (!p_link->min_pf_rate) {
+ p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
+ p_hwfn->qm_info.wfq_data[vp_id].configured = true;
+ return rc;
+ }
+
+ rc = qed_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
+
+ if (rc == 0)
+ qed_configure_wfq_for_all_vports(p_hwfn, p_ptt,
+ p_link->min_pf_rate);
+ else
+ DP_NOTICE(p_hwfn,
+ "Validation failed while configuring min rate\n");
+
+ return rc;
+}
+
+static int __qed_configure_vp_wfq_on_link_change(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 min_pf_rate)
+{
+ bool use_wfq = false;
+ int rc = 0;
+ u16 i;
+
+ /* Validate all pre configured vports for wfq */
+ for (i = 0; i < p_hwfn->qm_info.num_vports; i++) {
+ u32 rate;
+
+ if (!p_hwfn->qm_info.wfq_data[i].configured)
+ continue;
+
+ rate = p_hwfn->qm_info.wfq_data[i].min_speed;
+ use_wfq = true;
+
+ rc = qed_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "WFQ validation failed while configuring min rate\n");
+ break;
+ }
+ }
+
+ if (!rc && use_wfq)
+ qed_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+ else
+ qed_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
+
+ return rc;
+}
+
+/* Main API for qed clients to configure vport min rate.
+ * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
+ * rate - Speed in Mbps needs to be assigned to a given vport.
+ */
+int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate)
+{
+ int i, rc = -EINVAL;
+
+ /* Currently not supported; Might change in future */
+ if (cdev->num_hwfns > 1) {
+ DP_NOTICE(cdev,
+ "WFQ configuration is not supported for this device\n");
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_ptt *p_ptt;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = __qed_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
+
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+/* API to configure WFQ from mcp link change */
+void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, u32 min_pf_rate)
+{
+ int i;
+
+ if (cdev->num_hwfns > 1) {
+ DP_VERBOSE(cdev,
+ NETIF_MSG_LINK,
+ "WFQ configuration is not supported for this device\n");
+ return;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ __qed_configure_vp_wfq_on_link_change(p_hwfn,
+ p_hwfn->p_dpc_ptt,
+ min_pf_rate);
+ }
+}
+
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 max_bw)
+{
+ int rc = 0;
+
+ p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
+
+ if (!p_link->line_speed && (max_bw != 100))
+ return rc;
+
+ p_link->speed = (p_link->line_speed * max_bw) / 100;
+ p_hwfn->qm_info.pf_rl = p_link->speed;
+
+ /* Since the limiter also affects Tx-switched traffic, we don't want it
+ * to limit such traffic in case there's no actual limit.
+ * In that case, set limit to imaginary high boundary.
+ */
+ if (max_bw == 100)
+ p_hwfn->qm_info.pf_rl = 100000;
+
+ rc = qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
+ p_hwfn->qm_info.pf_rl);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configured MAX bandwidth to be %08x Mb/sec\n",
+ p_link->speed);
+
+ return rc;
+}
+
+/* Main API to configure PF max bandwidth where bw range is [1 - 100] */
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw)
+{
+ int i, rc = -EINVAL;
+
+ if (max_bw < 1 || max_bw > 100) {
+ DP_NOTICE(cdev, "PF max bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+ struct qed_mcp_link_state *p_link;
+ struct qed_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt,
+ p_link, max_bw);
+
+ qed_ptt_release(p_hwfn, p_ptt);
+
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 min_bw)
+{
+ int rc = 0;
+
+ p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
+ p_hwfn->qm_info.pf_wfq = min_bw;
+
+ if (!p_link->line_speed)
+ return rc;
+
+ p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
+
+ rc = qed_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
+
+ DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
+ "Configured MIN bandwidth to be %d Mb/sec\n",
+ p_link->min_pf_rate);
+
+ return rc;
+}
+
+/* Main API to configure PF min bandwidth where bw range is [1-100] */
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw)
+{
+ int i, rc = -EINVAL;
+
+ if (min_bw < 1 || min_bw > 100) {
+ DP_NOTICE(cdev, "PF min bw valid range is [1-100]\n");
+ return rc;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_hwfn *p_lead = QED_LEADING_HWFN(cdev);
+ struct qed_mcp_link_state *p_link;
+ struct qed_ptt *p_ptt;
+
+ p_link = &p_lead->mcp_info->link_output;
+
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt)
+ return -EBUSY;
+
+ rc = __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt,
+ p_link, min_bw);
+ if (rc) {
+ qed_ptt_release(p_hwfn, p_ptt);
+ return rc;
+ }
+
+ if (p_link->min_pf_rate) {
+ u32 min_rate = p_link->min_pf_rate;
+
+ rc = __qed_configure_vp_wfq_on_link_change(p_hwfn,
+ p_ptt,
+ min_rate);
+ }
+
+ qed_ptt_release(p_hwfn, p_ptt);
+ }
+
+ return rc;
+}
+
+void qed_clean_wfq_db(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_link_state *p_link;
+
+ p_link = &p_hwfn->mcp_info->link_output;
+
+ if (p_link->min_pf_rate)
+ qed_disable_wfq_for_all_vports(p_hwfn, p_ptt,
+ p_link->min_pf_rate);
+
+ memset(p_hwfn->qm_info.wfq_data, 0,
+ sizeof(*p_hwfn->qm_info.wfq_data) * p_hwfn->qm_info.num_vports);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index d6c7ddf4f4d4..343bb0344f62 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -62,6 +62,7 @@ void qed_resc_setup(struct qed_dev *cdev);
* @brief qed_hw_init -
*
* @param cdev
+ * @param p_tunn
* @param b_hw_start
* @param int_mode - interrupt mode [msix, inta, etc.] to use.
* @param allow_npar_tx_switch - npar tx switching to be used
@@ -72,6 +73,7 @@ void qed_resc_setup(struct qed_dev *cdev);
* @return int
*/
int qed_hw_init(struct qed_dev *cdev,
+ struct qed_tunn_start_params *p_tunn,
bool b_hw_start,
enum qed_int_mode int_mode,
bool allow_npar_tx_switch,
@@ -180,11 +182,15 @@ enum qed_dmae_address_type_t {
* used mostly to write a zeroed buffer to destination address
* using DMA
*/
-#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
-#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
+#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
+#define QED_DMAE_FLAG_VF_SRC 0x00000002
+#define QED_DMAE_FLAG_VF_DST 0x00000004
+#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
struct qed_dmae_params {
- u32 flags; /* consists of QED_DMAE_FLAG_* values */
+ u32 flags; /* consists of QED_DMAE_FLAG_* values */
+ u8 src_vfid;
+ u8 dst_vfid;
};
/**
@@ -206,6 +212,37 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
u32 size_in_dwords,
u32 flags);
+ /**
+ * @brief qed_dmae_grc2host - Read data from dmae data offset
+ * to source address using the given ptt
+ *
+ * @param p_ptt
+ * @param grc_addr (dmae_data_offset)
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param flags - one of the flags defined above
+ */
+int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
+ u32 flags);
+
+/**
+ * @brief qed_dmae_host2host - copy data from to source address
+ * to a destination adress (for SRIOV) using the given ptt
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param source_addr
+ * @param dest_addr
+ * @param size_in_dwords
+ * @param params
+ */
+int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct qed_dmae_params *p_params);
+
/**
* @brief qed_chain_alloc - Allocate and initialize a chain
*
@@ -222,9 +259,8 @@ int
qed_chain_alloc(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode,
- u16 num_elems,
- size_t elem_size,
- struct qed_chain *p_chain);
+ enum qed_chain_cnt_type cnt_type,
+ u32 num_elems, size_t elem_size, struct qed_chain *p_chain);
/**
* @brief qed_chain_free - Free chain DMA memory
@@ -232,8 +268,7 @@ qed_chain_alloc(struct qed_dev *cdev,
* @param p_hwfn
* @param p_chain
*/
-void qed_chain_free(struct qed_dev *cdev,
- struct qed_chain *p_chain);
+void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain);
/**
* @@brief qed_fw_l2_queue - Get absolute L2 queue ID
@@ -280,11 +315,44 @@ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
* @param p_hwfn
* @param p_ptt
* @param id - For PF, engine-relative. For VF, PF-relative.
+ * @param is_vf - true iff cleanup is made for a VF.
*
* @return int
*/
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u16 id);
+ struct qed_ptt *p_ptt, u16 id, bool is_vf);
+/**
+ * @brief qed_set_rxq_coalesce - Configure coalesce parameters for an Rx queue
+ * The fact that we can configure coalescing to up to 511, but on varying
+ * accuracy [the bigger the value the less accurate] up to a mistake of 3usec
+ * for the highest values.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param coalesce - Coalesce value in micro seconds.
+ * @param qid - Queue index.
+ * @param qid - SB Id
+ *
+ * @return int
+ */
+int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 coalesce, u8 qid, u16 sb_id);
+
+/**
+ * @brief qed_set_txq_coalesce - Configure coalesce parameters for a Tx queue
+ * While the API allows setting coalescing per-qid, all tx queues sharing a
+ * SB should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
+ * otherwise configuration would break.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param coalesce - Coalesce value in micro seconds.
+ * @param qid - Queue index.
+ * @param qid - SB Id
+ *
+ * @return int
+ */
+int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 coalesce, u8 qid, u16 sb_id);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index a368f5e71d95..6f9d3b831a2a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -17,24 +17,27 @@
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/qed/common_hsi.h>
+#include <linux/qed/storage_common.h>
+#include <linux/qed/tcp_common.h>
#include <linux/qed/eth_common.h>
+#include <linux/qed/iscsi_common.h>
+#include <linux/qed/rdma_common.h>
+#include <linux/qed/roce_common.h>
struct qed_hwfn;
struct qed_ptt;
-/********************************/
-/* Add include to common target */
-/********************************/
/* opcodes for the event ring */
enum common_event_opcode {
COMMON_EVENT_PF_START,
COMMON_EVENT_PF_STOP,
- COMMON_EVENT_RESERVED,
- COMMON_EVENT_RESERVED2,
- COMMON_EVENT_RESERVED3,
- COMMON_EVENT_RESERVED4,
- COMMON_EVENT_RESERVED5,
- COMMON_EVENT_RESERVED6,
+ COMMON_EVENT_VF_START,
+ COMMON_EVENT_VF_STOP,
+ COMMON_EVENT_VF_PF_CHANNEL,
+ COMMON_EVENT_VF_FLR,
+ COMMON_EVENT_PF_UPDATE,
+ COMMON_EVENT_MALICIOUS_VF,
+ COMMON_EVENT_RL_UPDATE,
COMMON_EVENT_EMPTY,
MAX_COMMON_EVENT_OPCODE
};
@@ -42,11 +45,12 @@ enum common_event_opcode {
/* Common Ramrod Command IDs */
enum common_ramrod_cmd_id {
COMMON_RAMROD_UNUSED,
- COMMON_RAMROD_PF_START /* PF Function Start Ramrod */,
- COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */,
- COMMON_RAMROD_RESERVED,
- COMMON_RAMROD_RESERVED2,
- COMMON_RAMROD_RESERVED3,
+ COMMON_RAMROD_PF_START,
+ COMMON_RAMROD_PF_STOP,
+ COMMON_RAMROD_VF_START,
+ COMMON_RAMROD_VF_STOP,
+ COMMON_RAMROD_PF_UPDATE,
+ COMMON_RAMROD_RL_UPDATE,
COMMON_RAMROD_EMPTY,
MAX_COMMON_RAMROD_CMD_ID
};
@@ -63,448 +67,448 @@ struct pstorm_core_conn_st_ctx {
/* Core Slowpath Connection storm context of Xstorm */
struct xstorm_core_conn_st_ctx {
- __le32 spq_base_lo /* SPQ Ring Base Address low dword */;
- __le32 spq_base_hi /* SPQ Ring Base Address high dword */;
- struct regpair consolid_base_addr;
- __le16 spq_cons /* SPQ Ring Consumer */;
- __le16 consolid_cons /* Consolidation Ring Consumer */;
- __le32 reserved0[55] /* Pad to 15 cycles */;
+ __le32 spq_base_lo;
+ __le32 spq_base_hi;
+ struct regpair consolid_base_addr;
+ __le16 spq_cons;
+ __le16 consolid_cons;
+ __le32 reserved0[55];
};
struct xstorm_core_conn_ag_ctx {
- u8 reserved0 /* cdu_validation */;
- u8 core_state /* state */;
- u8 flags0;
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 reserved0;
+ u8 core_state;
+ u8 flags0;
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */
-#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
-#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
-#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
-#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
-#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
-#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 /* cf16 */
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
-#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
-#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
-#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
-#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */
-#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
-#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 /* cf16en */
-#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */
-#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */
-#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
-#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
-#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 /* cf23en */
-#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */
-#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */
-#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */
-#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
-#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 /* bit16 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 /* bit17 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 /* bit18 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 /* bit19 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 /* bit20 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 /* bit21 */
-#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
-#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 /* cf23 */
-#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
- u8 byte2 /* byte2 */;
- __le16 physical_q0 /* physical_q0 */;
- __le16 consolid_prod /* physical_q1 */;
- __le16 reserved16 /* physical_q2 */;
- __le16 tx_bd_cons /* word3 */;
- __le16 tx_bd_or_spq_prod /* word4 */;
- __le16 word5 /* word5 */;
- __le16 conn_dpi /* conn_dpi */;
- u8 byte3 /* byte3 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- u8 byte6 /* byte6 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* cf_array0 */;
- __le32 reg6 /* cf_array1 */;
- __le16 word7 /* word7 */;
- __le16 word8 /* word8 */;
- __le16 word9 /* word9 */;
- __le16 word10 /* word10 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- __le32 reg9 /* reg9 */;
- u8 byte7 /* byte7 */;
- u8 byte8 /* byte8 */;
- u8 byte9 /* byte9 */;
- u8 byte10 /* byte10 */;
- u8 byte11 /* byte11 */;
- u8 byte12 /* byte12 */;
- u8 byte13 /* byte13 */;
- u8 byte14 /* byte14 */;
- u8 byte15 /* byte15 */;
- u8 byte16 /* byte16 */;
- __le16 word11 /* word11 */;
- __le32 reg10 /* reg10 */;
- __le32 reg11 /* reg11 */;
- __le32 reg12 /* reg12 */;
- __le32 reg13 /* reg13 */;
- __le32 reg14 /* reg14 */;
- __le32 reg15 /* reg15 */;
- __le32 reg16 /* reg16 */;
- __le32 reg17 /* reg17 */;
- __le32 reg18 /* reg18 */;
- __le32 reg19 /* reg19 */;
- __le16 word12 /* word12 */;
- __le16 word13 /* word13 */;
- __le16 word14 /* word14 */;
- __le16 word15 /* word15 */;
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 consolid_prod;
+ __le16 reserved16;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_or_spq_prod;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 byte16;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+ __le32 reg18;
+ __le32 reg19;
+ __le16 word12;
+ __le16 word13;
+ __le16 word14;
+ __le16 word15;
};
struct tstorm_core_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* reg5 */;
- __le32 reg6 /* reg6 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* word0 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- __le16 word1 /* word1 */;
- __le16 word2 /* conn_dpi */;
- __le16 word3 /* word3 */;
- __le32 reg9 /* reg9 */;
- __le32 reg10 /* reg10 */;
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ u8 byte4;
+ u8 byte5;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
};
struct ustorm_core_conn_ag_ctx {
- u8 reserved /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* conn_dpi */;
- __le16 word1 /* word1 */;
- __le32 rx_producers /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le16 word2 /* word2 */;
- __le16 word3 /* word3 */;
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 rx_producers;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
};
/* The core storm context for the Mstorm */
@@ -519,239 +523,821 @@ struct ustorm_core_conn_st_ctx {
/* core connection context */
struct core_conn_context {
- struct ystorm_core_conn_st_ctx ystorm_st_context;
- struct regpair ystorm_st_padding[2] /* padding */;
- struct pstorm_core_conn_st_ctx pstorm_st_context;
- struct regpair pstorm_st_padding[2];
- struct xstorm_core_conn_st_ctx xstorm_st_context;
- struct xstorm_core_conn_ag_ctx xstorm_ag_context;
- struct tstorm_core_conn_ag_ctx tstorm_ag_context;
- struct ustorm_core_conn_ag_ctx ustorm_ag_context;
- struct mstorm_core_conn_st_ctx mstorm_st_context;
- struct ustorm_core_conn_st_ctx ustorm_st_context;
- struct regpair ustorm_st_padding[2] /* padding */;
+ struct ystorm_core_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_core_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_core_conn_st_ctx xstorm_st_context;
+ struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct ustorm_core_conn_ag_ctx ustorm_ag_context;
+ struct mstorm_core_conn_st_ctx mstorm_st_context;
+ struct ustorm_core_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+};
+
+struct eth_mstorm_per_pf_stat {
+ struct regpair gre_discard_pkts;
+ struct regpair vxlan_discard_pkts;
+ struct regpair geneve_discard_pkts;
+ struct regpair lb_discard_pkts;
};
struct eth_mstorm_per_queue_stat {
- struct regpair ttl0_discard;
- struct regpair packet_too_big_discard;
- struct regpair no_buff_discard;
- struct regpair not_active_discard;
- struct regpair tpa_coalesced_pkts;
- struct regpair tpa_coalesced_events;
- struct regpair tpa_aborts_num;
- struct regpair tpa_coalesced_bytes;
-};
-
+ struct regpair ttl0_discard;
+ struct regpair packet_too_big_discard;
+ struct regpair no_buff_discard;
+ struct regpair not_active_discard;
+ struct regpair tpa_coalesced_pkts;
+ struct regpair tpa_coalesced_events;
+ struct regpair tpa_aborts_num;
+ struct regpair tpa_coalesced_bytes;
+};
+
+/* Ethernet TX Per PF */
+struct eth_pstorm_per_pf_stat {
+ struct regpair sent_lb_ucast_bytes;
+ struct regpair sent_lb_mcast_bytes;
+ struct regpair sent_lb_bcast_bytes;
+ struct regpair sent_lb_ucast_pkts;
+ struct regpair sent_lb_mcast_pkts;
+ struct regpair sent_lb_bcast_pkts;
+ struct regpair sent_gre_bytes;
+ struct regpair sent_vxlan_bytes;
+ struct regpair sent_geneve_bytes;
+ struct regpair sent_gre_pkts;
+ struct regpair sent_vxlan_pkts;
+ struct regpair sent_geneve_pkts;
+ struct regpair gre_drop_pkts;
+ struct regpair vxlan_drop_pkts;
+ struct regpair geneve_drop_pkts;
+};
+
+/* Ethernet TX Per Queue Stats */
struct eth_pstorm_per_queue_stat {
- struct regpair sent_ucast_bytes;
- struct regpair sent_mcast_bytes;
- struct regpair sent_bcast_bytes;
- struct regpair sent_ucast_pkts;
- struct regpair sent_mcast_pkts;
- struct regpair sent_bcast_pkts;
- struct regpair error_drop_pkts;
+ struct regpair sent_ucast_bytes;
+ struct regpair sent_mcast_bytes;
+ struct regpair sent_bcast_bytes;
+ struct regpair sent_ucast_pkts;
+ struct regpair sent_mcast_pkts;
+ struct regpair sent_bcast_pkts;
+ struct regpair error_drop_pkts;
+};
+
+/* ETH Rx producers data */
+struct eth_rx_rate_limit {
+ __le16 mult;
+ __le16 cnst;
+ u8 add_sub_cnst;
+ u8 reserved0;
+ __le16 reserved1;
+};
+
+struct eth_ustorm_per_pf_stat {
+ struct regpair rcv_lb_ucast_bytes;
+ struct regpair rcv_lb_mcast_bytes;
+ struct regpair rcv_lb_bcast_bytes;
+ struct regpair rcv_lb_ucast_pkts;
+ struct regpair rcv_lb_mcast_pkts;
+ struct regpair rcv_lb_bcast_pkts;
+ struct regpair rcv_gre_bytes;
+ struct regpair rcv_vxlan_bytes;
+ struct regpair rcv_geneve_bytes;
+ struct regpair rcv_gre_pkts;
+ struct regpair rcv_vxlan_pkts;
+ struct regpair rcv_geneve_pkts;
};
struct eth_ustorm_per_queue_stat {
- struct regpair rcv_ucast_bytes;
- struct regpair rcv_mcast_bytes;
- struct regpair rcv_bcast_bytes;
- struct regpair rcv_ucast_pkts;
- struct regpair rcv_mcast_pkts;
- struct regpair rcv_bcast_pkts;
+ struct regpair rcv_ucast_bytes;
+ struct regpair rcv_mcast_bytes;
+ struct regpair rcv_bcast_bytes;
+ struct regpair rcv_ucast_pkts;
+ struct regpair rcv_mcast_pkts;
+ struct regpair rcv_bcast_pkts;
};
/* Event Ring Next Page Address */
struct event_ring_next_addr {
- struct regpair addr /* Next Page Address */;
- __le32 reserved[2] /* Reserved */;
+ struct regpair addr;
+ __le32 reserved[2];
};
+/* Event Ring Element */
union event_ring_element {
- struct event_ring_entry entry /* Event Ring Entry */;
- struct event_ring_next_addr next_addr;
+ struct event_ring_entry entry;
+ struct event_ring_next_addr next_addr;
};
+/* Major and Minor hsi Versions */
+struct hsi_fp_ver_struct {
+ u8 minor_ver_arr[2];
+ u8 major_ver_arr[2];
+};
+
+/* Mstorm non-triggering VF zone */
+struct mstorm_non_trigger_vf_zone {
+ struct eth_mstorm_per_queue_stat eth_queue_stat;
+ struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF];
+};
+
+/* Mstorm VF zone */
+struct mstorm_vf_zone {
+ struct mstorm_non_trigger_vf_zone non_trigger;
+
+};
+
+/* personality per PF */
enum personality_type {
BAD_PERSONALITY_TYP,
- PERSONALITY_RESERVED,
+ PERSONALITY_ISCSI,
PERSONALITY_RESERVED2,
- PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp */,
+ PERSONALITY_RDMA_AND_ETH,
PERSONALITY_RESERVED3,
PERSONALITY_CORE,
- PERSONALITY_ETH /* Ethernet */,
+ PERSONALITY_ETH,
PERSONALITY_RESERVED4,
MAX_PERSONALITY_TYPE
};
+/* tunnel configuration */
struct pf_start_tunnel_config {
- u8 set_vxlan_udp_port_flg;
- u8 set_geneve_udp_port_flg;
- u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */;
- u8 tx_enable_l2geneve;
- u8 tx_enable_ipgeneve;
- u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */;
- u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */;
- u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */;
- u8 tunnel_clss_l2geneve;
- u8 tunnel_clss_ipgeneve;
- u8 tunnel_clss_l2gre;
- u8 tunnel_clss_ipgre;
- __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */;
- __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */;
+ u8 set_vxlan_udp_port_flg;
+ u8 set_geneve_udp_port_flg;
+ u8 tx_enable_vxlan;
+ u8 tx_enable_l2geneve;
+ u8 tx_enable_ipgeneve;
+ u8 tx_enable_l2gre;
+ u8 tx_enable_ipgre;
+ u8 tunnel_clss_vxlan;
+ u8 tunnel_clss_l2geneve;
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre;
+ u8 tunnel_clss_ipgre;
+ __le16 vxlan_udp_port;
+ __le16 geneve_udp_port;
};
/* Ramrod data for PF start ramrod */
struct pf_start_ramrod_data {
- struct regpair event_ring_pbl_addr;
- struct regpair consolid_q_pbl_addr;
- struct pf_start_tunnel_config tunnel_config;
- __le16 event_ring_sb_id;
- u8 base_vf_id;
- u8 num_vfs;
- u8 event_ring_num_pages;
- u8 event_ring_sb_index;
- u8 path_id;
- u8 warning_as_error;
- u8 dont_log_ramrods;
- u8 personality;
- __le16 log_type_mask;
- u8 mf_mode /* Multi function mode */;
- u8 integ_phase /* Integration phase */;
- u8 allow_npar_tx_switching;
- u8 inner_to_outer_pri_map[8];
- u8 pri_map_valid;
- u32 outer_tag;
- u8 reserved0[4];
-};
-
+ struct regpair event_ring_pbl_addr;
+ struct regpair consolid_q_pbl_addr;
+ struct pf_start_tunnel_config tunnel_config;
+ __le16 event_ring_sb_id;
+ u8 base_vf_id;
+ u8 num_vfs;
+ u8 event_ring_num_pages;
+ u8 event_ring_sb_index;
+ u8 path_id;
+ u8 warning_as_error;
+ u8 dont_log_ramrods;
+ u8 personality;
+ __le16 log_type_mask;
+ u8 mf_mode;
+ u8 integ_phase;
+ u8 allow_npar_tx_switching;
+ u8 inner_to_outer_pri_map[8];
+ u8 pri_map_valid;
+ __le32 outer_tag;
+ struct hsi_fp_ver_struct hsi_fp_ver;
+
+};
+
+struct protocol_dcb_data {
+ u8 dcb_enable_flag;
+ u8 dcb_priority;
+ u8 dcb_tc;
+ u8 reserved;
+};
+
+struct pf_update_tunnel_config {
+ u8 update_rx_pf_clss;
+ u8 update_tx_pf_clss;
+ u8 set_vxlan_udp_port_flg;
+ u8 set_geneve_udp_port_flg;
+ u8 tx_enable_vxlan;
+ u8 tx_enable_l2geneve;
+ u8 tx_enable_ipgeneve;
+ u8 tx_enable_l2gre;
+ u8 tx_enable_ipgre;
+ u8 tunnel_clss_vxlan;
+ u8 tunnel_clss_l2geneve;
+ u8 tunnel_clss_ipgeneve;
+ u8 tunnel_clss_l2gre;
+ u8 tunnel_clss_ipgre;
+ __le16 vxlan_udp_port;
+ __le16 geneve_udp_port;
+ __le16 reserved[3];
+};
+
+struct pf_update_ramrod_data {
+ u8 pf_id;
+ u8 update_eth_dcb_data_flag;
+ u8 update_fcoe_dcb_data_flag;
+ u8 update_iscsi_dcb_data_flag;
+ u8 update_roce_dcb_data_flag;
+ u8 update_iwarp_dcb_data_flag;
+ u8 update_mf_vlan_flag;
+ u8 reserved;
+ struct protocol_dcb_data eth_dcb_data;
+ struct protocol_dcb_data fcoe_dcb_data;
+ struct protocol_dcb_data iscsi_dcb_data;
+ struct protocol_dcb_data roce_dcb_data;
+ struct protocol_dcb_data iwarp_dcb_data;
+ __le16 mf_vlan;
+ __le16 reserved2;
+ struct pf_update_tunnel_config tunnel_config;
+};
+
+/* Ports mode */
enum ports_mode {
- ENGX2_PORTX1 /* 2 engines x 1 port */,
- ENGX2_PORTX2 /* 2 engines x 2 ports */,
- ENGX1_PORTX1 /* 1 engine x 1 port */,
- ENGX1_PORTX2 /* 1 engine x 2 ports */,
- ENGX1_PORTX4 /* 1 engine x 4 ports */,
+ ENGX2_PORTX1,
+ ENGX2_PORTX2,
+ ENGX1_PORTX1,
+ ENGX1_PORTX2,
+ ENGX1_PORTX4,
MAX_PORTS_MODE
};
+/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */
+enum protocol_version_array_key {
+ ETH_VER_KEY = 0,
+ ROCE_VER_KEY,
+ MAX_PROTOCOL_VERSION_ARRAY_KEY
+};
+
+/* Pstorm non-triggering VF zone */
+struct pstorm_non_trigger_vf_zone {
+ struct eth_pstorm_per_queue_stat eth_queue_stat;
+ struct regpair reserved[2];
+};
+
+/* Pstorm VF zone */
+struct pstorm_vf_zone {
+ struct pstorm_non_trigger_vf_zone non_trigger;
+ struct regpair reserved[7];
+};
+
/* Ramrod Header of SPQE */
struct ramrod_header {
- __le32 cid /* Slowpath Connection CID */;
- u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */;
- u8 protocol_id /* Ramrod Protocol ID */;
- __le16 echo /* Ramrod echo */;
+ __le32 cid;
+ u8 cmd_id;
+ u8 protocol_id;
+ __le16 echo;
};
/* Slowpath Element (SPQE) */
struct slow_path_element {
- struct ramrod_header hdr /* Ramrod Header */;
- struct regpair data_ptr;
+ struct ramrod_header hdr;
+ struct regpair data_ptr;
+};
+
+/* Tstorm non-triggering VF zone */
+struct tstorm_non_trigger_vf_zone {
+ struct regpair reserved[2];
};
struct tstorm_per_port_stat {
- struct regpair trunc_error_discard;
- struct regpair mac_error_discard;
- struct regpair mftag_filter_discard;
- struct regpair eth_mac_filter_discard;
- struct regpair ll2_mac_filter_discard;
- struct regpair ll2_conn_disabled_discard;
- struct regpair iscsi_irregular_pkt;
- struct regpair fcoe_irregular_pkt;
- struct regpair roce_irregular_pkt;
- struct regpair eth_irregular_pkt;
- struct regpair toe_irregular_pkt;
- struct regpair preroce_irregular_pkt;
+ struct regpair trunc_error_discard;
+ struct regpair mac_error_discard;
+ struct regpair mftag_filter_discard;
+ struct regpair eth_mac_filter_discard;
+ struct regpair reserved[5];
+ struct regpair eth_irregular_pkt;
+ struct regpair reserved1[2];
+ struct regpair eth_gre_tunn_filter_discard;
+ struct regpair eth_vxlan_tunn_filter_discard;
+ struct regpair eth_geneve_tunn_filter_discard;
+};
+
+/* Tstorm VF zone */
+struct tstorm_vf_zone {
+ struct tstorm_non_trigger_vf_zone non_trigger;
+};
+
+/* Tunnel classification scheme */
+enum tunnel_clss {
+ TUNNEL_CLSS_MAC_VLAN = 0,
+ TUNNEL_CLSS_MAC_VNI,
+ TUNNEL_CLSS_INNER_MAC_VLAN,
+ TUNNEL_CLSS_INNER_MAC_VNI,
+ TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE,
+ MAX_TUNNEL_CLSS
+};
+
+/* Ustorm non-triggering VF zone */
+struct ustorm_non_trigger_vf_zone {
+ struct eth_ustorm_per_queue_stat eth_queue_stat;
+ struct regpair vf_pf_msg_addr;
+};
+
+/* Ustorm triggering VF zone */
+struct ustorm_trigger_vf_zone {
+ u8 vf_pf_msg_valid;
+ u8 reserved[7];
+};
+
+/* Ustorm VF zone */
+struct ustorm_vf_zone {
+ struct ustorm_non_trigger_vf_zone non_trigger;
+ struct ustorm_trigger_vf_zone trigger;
+};
+
+/* VF-PF channel data */
+struct vf_pf_channel_data {
+ __le32 ready;
+ u8 valid;
+ u8 reserved0;
+ __le16 reserved1;
+};
+
+/* Ramrod data for VF start ramrod */
+struct vf_start_ramrod_data {
+ u8 vf_id;
+ u8 enable_flr_ack;
+ __le16 opaque_fid;
+ u8 personality;
+ u8 reserved[7];
+ struct hsi_fp_ver_struct hsi_fp_ver;
+
};
+/* Ramrod data for VF start ramrod */
+struct vf_stop_ramrod_data {
+ u8 vf_id;
+ u8 reserved0;
+ __le16 reserved1;
+ __le32 reserved2;
+};
+
+/* Attentions status block */
struct atten_status_block {
- __le32 atten_bits;
- __le32 atten_ack;
- __le16 reserved0;
- __le16 sb_index /* status block running index */;
- __le32 reserved1;
+ __le32 atten_bits;
+ __le32 atten_ack;
+ __le16 reserved0;
+ __le16 sb_index;
+ __le32 reserved1;
+};
+
+enum command_type_bit {
+ IGU_COMMAND_TYPE_NOP = 0,
+ IGU_COMMAND_TYPE_SET = 1,
+ MAX_COMMAND_TYPE_BIT
+};
+
+/* DMAE command */
+struct dmae_cmd {
+ __le32 opcode;
+#define DMAE_CMD_SRC_MASK 0x1
+#define DMAE_CMD_SRC_SHIFT 0
+#define DMAE_CMD_DST_MASK 0x3
+#define DMAE_CMD_DST_SHIFT 1
+#define DMAE_CMD_C_DST_MASK 0x1
+#define DMAE_CMD_C_DST_SHIFT 3
+#define DMAE_CMD_CRC_RESET_MASK 0x1
+#define DMAE_CMD_CRC_RESET_SHIFT 4
+#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
+#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
+#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
+#define DMAE_CMD_COMP_FUNC_MASK 0x1
+#define DMAE_CMD_COMP_FUNC_SHIFT 7
+#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
+#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
+#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
+#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
+#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
+#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
+#define DMAE_CMD_RESERVED1_MASK 0x1
+#define DMAE_CMD_RESERVED1_SHIFT 13
+#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
+#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
+#define DMAE_CMD_ERR_HANDLING_MASK 0x3
+#define DMAE_CMD_ERR_HANDLING_SHIFT 16
+#define DMAE_CMD_PORT_ID_MASK 0x3
+#define DMAE_CMD_PORT_ID_SHIFT 18
+#define DMAE_CMD_SRC_PF_ID_MASK 0xF
+#define DMAE_CMD_SRC_PF_ID_SHIFT 20
+#define DMAE_CMD_DST_PF_ID_MASK 0xF
+#define DMAE_CMD_DST_PF_ID_SHIFT 24
+#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
+#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
+#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
+#define DMAE_CMD_RESERVED2_MASK 0x3
+#define DMAE_CMD_RESERVED2_SHIFT 30
+ __le32 src_addr_lo;
+ __le32 src_addr_hi;
+ __le32 dst_addr_lo;
+ __le32 dst_addr_hi;
+ __le16 length_dw;
+ __le16 opcode_b;
+#define DMAE_CMD_SRC_VF_ID_MASK 0xFF
+#define DMAE_CMD_SRC_VF_ID_SHIFT 0
+#define DMAE_CMD_DST_VF_ID_MASK 0xFF
+#define DMAE_CMD_DST_VF_ID_SHIFT 8
+ __le32 comp_addr_lo;
+ __le32 comp_addr_hi;
+ __le32 comp_val;
+ __le32 crc32;
+ __le32 crc_32_c;
+ __le16 crc16;
+ __le16 crc16_c;
+ __le16 crc10;
+ __le16 reserved;
+ __le16 xsum16;
+ __le16 xsum8;
+};
+
+enum dmae_cmd_comp_crc_en_enum {
+ dmae_cmd_comp_crc_disabled,
+ dmae_cmd_comp_crc_enabled,
+ MAX_DMAE_CMD_COMP_CRC_EN_ENUM
+};
+
+enum dmae_cmd_comp_func_enum {
+ dmae_cmd_comp_func_to_src,
+ dmae_cmd_comp_func_to_dst,
+ MAX_DMAE_CMD_COMP_FUNC_ENUM
+};
+
+enum dmae_cmd_comp_word_en_enum {
+ dmae_cmd_comp_word_disabled,
+ dmae_cmd_comp_word_enabled,
+ MAX_DMAE_CMD_COMP_WORD_EN_ENUM
+};
+
+enum dmae_cmd_c_dst_enum {
+ dmae_cmd_c_dst_pcie,
+ dmae_cmd_c_dst_grc,
+ MAX_DMAE_CMD_C_DST_ENUM
+};
+
+enum dmae_cmd_dst_enum {
+ dmae_cmd_dst_none_0,
+ dmae_cmd_dst_pcie,
+ dmae_cmd_dst_grc,
+ dmae_cmd_dst_none_3,
+ MAX_DMAE_CMD_DST_ENUM
+};
+
+enum dmae_cmd_error_handling_enum {
+ dmae_cmd_error_handling_send_regular_comp,
+ dmae_cmd_error_handling_send_comp_with_err,
+ dmae_cmd_error_handling_dont_send_comp,
+ MAX_DMAE_CMD_ERROR_HANDLING_ENUM
+};
+
+enum dmae_cmd_src_enum {
+ dmae_cmd_src_pcie,
+ dmae_cmd_src_grc,
+ MAX_DMAE_CMD_SRC_ENUM
+};
+
+/* IGU cleanup command */
+struct igu_cleanup {
+ __le32 sb_id_and_flags;
+#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
+#define IGU_CLEANUP_RESERVED0_SHIFT 0
+#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1
+#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
+#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
+#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
+#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
+#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
+ __le32 reserved1;
+};
+
+/* IGU firmware driver command */
+union igu_command {
+ struct igu_prod_cons_update prod_cons_update;
+ struct igu_cleanup cleanup;
};
+/* IGU firmware driver command */
+struct igu_command_reg_ctrl {
+ __le16 opaque_fid;
+ __le16 igu_command_reg_ctrl_fields;
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
+#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
+#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
+#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
+#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+};
+
+/* IGU mapping line structure */
+struct igu_mapping_line {
+ __le32 igu_mapping_line_fields;
+#define IGU_MAPPING_LINE_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_VALID_SHIFT 0
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
+#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
+#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1
+#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
+#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
+#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
+#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
+#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
+};
+
+/* IGU MSIX line structure */
+struct igu_msix_vector {
+ struct regpair address;
+ __le32 data;
+ __le32 msix_vector_fields;
+#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
+#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
+#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
+#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
+#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
+#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
+#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
+#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
+};
+
+struct mstorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+/* per encapsulation type enabling flags */
+struct prs_reg_encapsulation_type_en {
+ u8 flags;
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
+#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
+#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
+};
+
+enum pxp_tph_st_hint {
+ TPH_ST_HINT_BIDIR,
+ TPH_ST_HINT_REQUESTER,
+ TPH_ST_HINT_TARGET,
+ TPH_ST_HINT_TARGET_PRIO,
+ MAX_PXP_TPH_ST_HINT
+};
+
+/* QM hardware structure of enable bypass credit mask */
+struct qm_rf_bypass_mask {
+ u8 flags;
+#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
+#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
+#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
+#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
+#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
+#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+};
+
+/* QM hardware structure of opportunistic credit mask */
+struct qm_rf_opportunistic_mask {
+ __le16 flags;
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
+#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
+#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
+};
+
+/* QM hardware structure of QM map memory */
+struct qm_rf_pq_map {
+ __le32 reg;
+#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF
+#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_VOQ_MASK 0x1F
+#define QM_RF_PQ_MAP_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
+};
+
+/* Completion params for aggregated interrupt completion */
+struct sdm_agg_int_comp_params {
+ __le16 params;
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
+#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
+#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
+};
+
+/* SDM operation gen command (generate aggregative interrupt) */
+struct sdm_op_gen {
+ __le32 command;
+#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF
+#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
+#define SDM_OP_GEN_COMP_TYPE_MASK 0xF
+#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
+#define SDM_OP_GEN_RESERVED_MASK 0xFFF
+#define SDM_OP_GEN_RESERVED_SHIFT 20
+};
+
+struct ystorm_core_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+/****************************************/
+/* Debug Tools HSI constants and macros */
+/****************************************/
+
enum block_addr {
- GRCBASE_GRC = 0x50000,
- GRCBASE_MISCS = 0x9000,
- GRCBASE_MISC = 0x8000,
- GRCBASE_DBU = 0xa000,
- GRCBASE_PGLUE_B = 0x2a8000,
- GRCBASE_CNIG = 0x218000,
- GRCBASE_CPMU = 0x30000,
- GRCBASE_NCSI = 0x40000,
- GRCBASE_OPTE = 0x53000,
- GRCBASE_BMB = 0x540000,
- GRCBASE_PCIE = 0x54000,
- GRCBASE_MCP = 0xe00000,
- GRCBASE_MCP2 = 0x52000,
- GRCBASE_PSWHST = 0x2a0000,
- GRCBASE_PSWHST2 = 0x29e000,
- GRCBASE_PSWRD = 0x29c000,
- GRCBASE_PSWRD2 = 0x29d000,
- GRCBASE_PSWWR = 0x29a000,
- GRCBASE_PSWWR2 = 0x29b000,
- GRCBASE_PSWRQ = 0x280000,
- GRCBASE_PSWRQ2 = 0x240000,
- GRCBASE_PGLCS = 0x0,
- GRCBASE_PTU = 0x560000,
- GRCBASE_DMAE = 0xc000,
- GRCBASE_TCM = 0x1180000,
- GRCBASE_MCM = 0x1200000,
- GRCBASE_UCM = 0x1280000,
- GRCBASE_XCM = 0x1000000,
- GRCBASE_YCM = 0x1080000,
- GRCBASE_PCM = 0x1100000,
- GRCBASE_QM = 0x2f0000,
- GRCBASE_TM = 0x2c0000,
- GRCBASE_DORQ = 0x100000,
- GRCBASE_BRB = 0x340000,
- GRCBASE_SRC = 0x238000,
- GRCBASE_PRS = 0x1f0000,
- GRCBASE_TSDM = 0xfb0000,
- GRCBASE_MSDM = 0xfc0000,
- GRCBASE_USDM = 0xfd0000,
- GRCBASE_XSDM = 0xf80000,
- GRCBASE_YSDM = 0xf90000,
- GRCBASE_PSDM = 0xfa0000,
- GRCBASE_TSEM = 0x1700000,
- GRCBASE_MSEM = 0x1800000,
- GRCBASE_USEM = 0x1900000,
- GRCBASE_XSEM = 0x1400000,
- GRCBASE_YSEM = 0x1500000,
- GRCBASE_PSEM = 0x1600000,
- GRCBASE_RSS = 0x238800,
- GRCBASE_TMLD = 0x4d0000,
- GRCBASE_MULD = 0x4e0000,
- GRCBASE_YULD = 0x4c8000,
- GRCBASE_XYLD = 0x4c0000,
- GRCBASE_PRM = 0x230000,
- GRCBASE_PBF_PB1 = 0xda0000,
- GRCBASE_PBF_PB2 = 0xda4000,
- GRCBASE_RPB = 0x23c000,
- GRCBASE_BTB = 0xdb0000,
- GRCBASE_PBF = 0xd80000,
- GRCBASE_RDIF = 0x300000,
- GRCBASE_TDIF = 0x310000,
- GRCBASE_CDU = 0x580000,
- GRCBASE_CCFC = 0x2e0000,
- GRCBASE_TCFC = 0x2d0000,
- GRCBASE_IGU = 0x180000,
- GRCBASE_CAU = 0x1c0000,
- GRCBASE_UMAC = 0x51000,
- GRCBASE_XMAC = 0x210000,
- GRCBASE_DBG = 0x10000,
- GRCBASE_NIG = 0x500000,
- GRCBASE_WOL = 0x600000,
- GRCBASE_BMBN = 0x610000,
- GRCBASE_IPC = 0x20000,
- GRCBASE_NWM = 0x800000,
- GRCBASE_NWS = 0x700000,
- GRCBASE_MS = 0x6a0000,
- GRCBASE_PHY_PCIE = 0x620000,
- GRCBASE_MISC_AEU = 0x8000,
- GRCBASE_BAR0_MAP = 0x1c00000,
+ GRCBASE_GRC = 0x50000,
+ GRCBASE_MISCS = 0x9000,
+ GRCBASE_MISC = 0x8000,
+ GRCBASE_DBU = 0xa000,
+ GRCBASE_PGLUE_B = 0x2a8000,
+ GRCBASE_CNIG = 0x218000,
+ GRCBASE_CPMU = 0x30000,
+ GRCBASE_NCSI = 0x40000,
+ GRCBASE_OPTE = 0x53000,
+ GRCBASE_BMB = 0x540000,
+ GRCBASE_PCIE = 0x54000,
+ GRCBASE_MCP = 0xe00000,
+ GRCBASE_MCP2 = 0x52000,
+ GRCBASE_PSWHST = 0x2a0000,
+ GRCBASE_PSWHST2 = 0x29e000,
+ GRCBASE_PSWRD = 0x29c000,
+ GRCBASE_PSWRD2 = 0x29d000,
+ GRCBASE_PSWWR = 0x29a000,
+ GRCBASE_PSWWR2 = 0x29b000,
+ GRCBASE_PSWRQ = 0x280000,
+ GRCBASE_PSWRQ2 = 0x240000,
+ GRCBASE_PGLCS = 0x0,
+ GRCBASE_DMAE = 0xc000,
+ GRCBASE_PTU = 0x560000,
+ GRCBASE_TCM = 0x1180000,
+ GRCBASE_MCM = 0x1200000,
+ GRCBASE_UCM = 0x1280000,
+ GRCBASE_XCM = 0x1000000,
+ GRCBASE_YCM = 0x1080000,
+ GRCBASE_PCM = 0x1100000,
+ GRCBASE_QM = 0x2f0000,
+ GRCBASE_TM = 0x2c0000,
+ GRCBASE_DORQ = 0x100000,
+ GRCBASE_BRB = 0x340000,
+ GRCBASE_SRC = 0x238000,
+ GRCBASE_PRS = 0x1f0000,
+ GRCBASE_TSDM = 0xfb0000,
+ GRCBASE_MSDM = 0xfc0000,
+ GRCBASE_USDM = 0xfd0000,
+ GRCBASE_XSDM = 0xf80000,
+ GRCBASE_YSDM = 0xf90000,
+ GRCBASE_PSDM = 0xfa0000,
+ GRCBASE_TSEM = 0x1700000,
+ GRCBASE_MSEM = 0x1800000,
+ GRCBASE_USEM = 0x1900000,
+ GRCBASE_XSEM = 0x1400000,
+ GRCBASE_YSEM = 0x1500000,
+ GRCBASE_PSEM = 0x1600000,
+ GRCBASE_RSS = 0x238800,
+ GRCBASE_TMLD = 0x4d0000,
+ GRCBASE_MULD = 0x4e0000,
+ GRCBASE_YULD = 0x4c8000,
+ GRCBASE_XYLD = 0x4c0000,
+ GRCBASE_PRM = 0x230000,
+ GRCBASE_PBF_PB1 = 0xda0000,
+ GRCBASE_PBF_PB2 = 0xda4000,
+ GRCBASE_RPB = 0x23c000,
+ GRCBASE_BTB = 0xdb0000,
+ GRCBASE_PBF = 0xd80000,
+ GRCBASE_RDIF = 0x300000,
+ GRCBASE_TDIF = 0x310000,
+ GRCBASE_CDU = 0x580000,
+ GRCBASE_CCFC = 0x2e0000,
+ GRCBASE_TCFC = 0x2d0000,
+ GRCBASE_IGU = 0x180000,
+ GRCBASE_CAU = 0x1c0000,
+ GRCBASE_UMAC = 0x51000,
+ GRCBASE_XMAC = 0x210000,
+ GRCBASE_DBG = 0x10000,
+ GRCBASE_NIG = 0x500000,
+ GRCBASE_WOL = 0x600000,
+ GRCBASE_BMBN = 0x610000,
+ GRCBASE_IPC = 0x20000,
+ GRCBASE_NWM = 0x800000,
+ GRCBASE_NWS = 0x700000,
+ GRCBASE_MS = 0x6a0000,
+ GRCBASE_PHY_PCIE = 0x620000,
+ GRCBASE_LED = 0x6b8000,
+ GRCBASE_MISC_AEU = 0x8000,
+ GRCBASE_BAR0_MAP = 0x1c00000,
MAX_BLOCK_ADDR
};
@@ -778,8 +1364,8 @@ enum block_id {
BLOCK_PSWRQ,
BLOCK_PSWRQ2,
BLOCK_PGLCS,
- BLOCK_PTU,
BLOCK_DMAE,
+ BLOCK_PTU,
BLOCK_TCM,
BLOCK_MCM,
BLOCK_UCM,
@@ -833,141 +1419,216 @@ enum block_id {
BLOCK_NWS,
BLOCK_MS,
BLOCK_PHY_PCIE,
+ BLOCK_LED,
BLOCK_MISC_AEU,
BLOCK_BAR0_MAP,
MAX_BLOCK_ID
};
-enum command_type_bit {
- IGU_COMMAND_TYPE_NOP = 0,
- IGU_COMMAND_TYPE_SET = 1,
- MAX_COMMAND_TYPE_BIT
+/* binary debug buffer types */
+enum bin_dbg_buffer_type {
+ BIN_BUF_DBG_MODE_TREE,
+ BIN_BUF_DBG_DUMP_REG,
+ BIN_BUF_DBG_DUMP_MEM,
+ BIN_BUF_DBG_IDLE_CHK_REGS,
+ BIN_BUF_DBG_IDLE_CHK_IMMS,
+ BIN_BUF_DBG_IDLE_CHK_RULES,
+ BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
+ BIN_BUF_DBG_ATTN_BLOCKS,
+ BIN_BUF_DBG_ATTN_REGS,
+ BIN_BUF_DBG_ATTN_INDEXES,
+ BIN_BUF_DBG_ATTN_NAME_OFFSETS,
+ BIN_BUF_DBG_PARSING_STRINGS,
+ MAX_BIN_DBG_BUFFER_TYPE
};
-struct dmae_cmd {
- __le32 opcode;
-#define DMAE_CMD_SRC_MASK 0x1
-#define DMAE_CMD_SRC_SHIFT 0
-#define DMAE_CMD_DST_MASK 0x3
-#define DMAE_CMD_DST_SHIFT 1
-#define DMAE_CMD_C_DST_MASK 0x1
-#define DMAE_CMD_C_DST_SHIFT 3
-#define DMAE_CMD_CRC_RESET_MASK 0x1
-#define DMAE_CMD_CRC_RESET_SHIFT 4
-#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1
-#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5
-#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1
-#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6
-#define DMAE_CMD_COMP_FUNC_MASK 0x1
-#define DMAE_CMD_COMP_FUNC_SHIFT 7
-#define DMAE_CMD_COMP_WORD_EN_MASK 0x1
-#define DMAE_CMD_COMP_WORD_EN_SHIFT 8
-#define DMAE_CMD_COMP_CRC_EN_MASK 0x1
-#define DMAE_CMD_COMP_CRC_EN_SHIFT 9
-#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7
-#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10
-#define DMAE_CMD_RESERVED1_MASK 0x1
-#define DMAE_CMD_RESERVED1_SHIFT 13
-#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3
-#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14
-#define DMAE_CMD_ERR_HANDLING_MASK 0x3
-#define DMAE_CMD_ERR_HANDLING_SHIFT 16
-#define DMAE_CMD_PORT_ID_MASK 0x3
-#define DMAE_CMD_PORT_ID_SHIFT 18
-#define DMAE_CMD_SRC_PF_ID_MASK 0xF
-#define DMAE_CMD_SRC_PF_ID_SHIFT 20
-#define DMAE_CMD_DST_PF_ID_MASK 0xF
-#define DMAE_CMD_DST_PF_ID_SHIFT 24
-#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1
-#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28
-#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1
-#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29
-#define DMAE_CMD_RESERVED2_MASK 0x3
-#define DMAE_CMD_RESERVED2_SHIFT 30
- __le32 src_addr_lo;
- __le32 src_addr_hi;
- __le32 dst_addr_lo;
- __le32 dst_addr_hi;
- __le16 length /* Length in DW */;
- __le16 opcode_b;
-#define DMAE_CMD_SRC_VF_ID_MASK 0xFF /* Source VF id */
-#define DMAE_CMD_SRC_VF_ID_SHIFT 0
-#define DMAE_CMD_DST_VF_ID_MASK 0xFF /* Destination VF id */
-#define DMAE_CMD_DST_VF_ID_SHIFT 8
- __le32 comp_addr_lo /* PCIe completion address low or grc address */;
- __le32 comp_addr_hi;
- __le32 comp_val /* Value to write to copmletion address */;
- __le32 crc32 /* crc16 result */;
- __le32 crc_32_c /* crc32_c result */;
- __le16 crc16 /* crc16 result */;
- __le16 crc16_c /* crc16_c result */;
- __le16 crc10 /* crc_t10 result */;
- __le16 reserved;
- __le16 xsum16 /* checksum16 result */;
- __le16 xsum8 /* checksum8 result */;
+/* Chip IDs */
+enum chip_ids {
+ CHIP_RESERVED,
+ CHIP_BB_B0,
+ CHIP_RESERVED2,
+ MAX_CHIP_IDS
};
-struct igu_cleanup {
- __le32 sb_id_and_flags;
-#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF
-#define IGU_CLEANUP_RESERVED0_SHIFT 0
-#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 /* cleanup clear - 0, set - 1 */
-#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27
-#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7
-#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28
-#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1
-#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31
- __le32 reserved1;
+/* Attention bit mapping */
+struct dbg_attn_bit_mapping {
+ __le16 data;
+#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
+#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
};
-union igu_command {
- struct igu_prod_cons_update prod_cons_update;
- struct igu_cleanup cleanup;
+/* Attention block per-type data */
+struct dbg_attn_block_type_data {
+ __le16 names_offset;
+ __le16 reserved1;
+ u8 num_regs;
+ u8 reserved2;
+ __le16 regs_offset;
};
-struct igu_command_reg_ctrl {
- __le16 opaque_fid;
- __le16 igu_command_reg_ctrl_fields;
-#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF
-#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0
-#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7
-#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12
-#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1
-#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15
+/* Block attentions */
+struct dbg_attn_block {
+ struct dbg_attn_block_type_data per_type_data[2];
};
-struct igu_mapping_line {
- __le32 igu_mapping_line_fields;
-#define IGU_MAPPING_LINE_VALID_MASK 0x1
-#define IGU_MAPPING_LINE_VALID_SHIFT 0
-#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF
-#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1
-#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF
-#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9
-#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 /* PF-1, VF-0 */
-#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17
-#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F
-#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18
-#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF
-#define IGU_MAPPING_LINE_RESERVED_SHIFT 24
+/* Attention register result */
+struct dbg_attn_reg_result {
+ __le32 data;
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_ATTN_IDX_SHIFT 24
+ __le16 attn_idx_offset;
+ __le16 reserved;
+ __le32 sts_val;
+ __le32 mask_val;
+};
+
+/* Attention block result */
+struct dbg_attn_block_result {
+ u8 block_id;
+ u8 data;
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
+ __le16 names_offset;
+ struct dbg_attn_reg_result reg_results[15];
+};
+
+/* mode header */
+struct dbg_mode_hdr {
+ __le16 data;
+#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
+#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
+};
+
+/* Attention register */
+struct dbg_attn_reg {
+ struct dbg_mode_hdr mode;
+ __le16 attn_idx_offset;
+ __le32 data;
+#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_NUM_ATTN_IDX_MASK 0xFF
+#define DBG_ATTN_REG_NUM_ATTN_IDX_SHIFT 24
+ __le32 sts_clr_address;
+ __le32 mask_address;
+};
+
+/* attention types */
+enum dbg_attn_type {
+ ATTN_TYPE_INTERRUPT,
+ ATTN_TYPE_PARITY,
+ MAX_DBG_ATTN_TYPE
+};
+
+/* Debug status codes */
+enum dbg_status {
+ DBG_STATUS_OK,
+ DBG_STATUS_APP_VERSION_NOT_SET,
+ DBG_STATUS_UNSUPPORTED_APP_VERSION,
+ DBG_STATUS_DBG_BLOCK_NOT_RESET,
+ DBG_STATUS_INVALID_ARGS,
+ DBG_STATUS_OUTPUT_ALREADY_SET,
+ DBG_STATUS_INVALID_PCI_BUF_SIZE,
+ DBG_STATUS_PCI_BUF_ALLOC_FAILED,
+ DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
+ DBG_STATUS_TOO_MANY_INPUTS,
+ DBG_STATUS_INPUT_OVERLAP,
+ DBG_STATUS_HW_ONLY_RECORDING,
+ DBG_STATUS_STORM_ALREADY_ENABLED,
+ DBG_STATUS_STORM_NOT_ENABLED,
+ DBG_STATUS_BLOCK_ALREADY_ENABLED,
+ DBG_STATUS_BLOCK_NOT_ENABLED,
+ DBG_STATUS_NO_INPUT_ENABLED,
+ DBG_STATUS_NO_FILTER_TRIGGER_64B,
+ DBG_STATUS_FILTER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_NOT_ENABLED,
+ DBG_STATUS_CANT_ADD_CONSTRAINT,
+ DBG_STATUS_TOO_MANY_TRIGGER_STATES,
+ DBG_STATUS_TOO_MANY_CONSTRAINTS,
+ DBG_STATUS_RECORDING_NOT_STARTED,
+ DBG_STATUS_DATA_DIDNT_TRIGGER,
+ DBG_STATUS_NO_DATA_RECORDED,
+ DBG_STATUS_DUMP_BUF_TOO_SMALL,
+ DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
+ DBG_STATUS_UNKNOWN_CHIP,
+ DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
+ DBG_STATUS_BLOCK_IN_RESET,
+ DBG_STATUS_INVALID_TRACE_SIGNATURE,
+ DBG_STATUS_INVALID_NVRAM_BUNDLE,
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
+ DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
+ DBG_STATUS_NVRAM_READ_FAILED,
+ DBG_STATUS_IDLE_CHK_PARSE_FAILED,
+ DBG_STATUS_MCP_TRACE_BAD_DATA,
+ DBG_STATUS_MCP_TRACE_NO_META,
+ DBG_STATUS_MCP_COULD_NOT_HALT,
+ DBG_STATUS_MCP_COULD_NOT_RESUME,
+ DBG_STATUS_DMAE_FAILED,
+ DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
+ DBG_STATUS_IGU_FIFO_BAD_DATA,
+ DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
+ DBG_STATUS_REG_FIFO_BAD_DATA,
+ DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
+ DBG_STATUS_DBG_ARRAY_NOT_SET,
+ MAX_DBG_STATUS
};
-struct igu_msix_vector {
- struct regpair address;
- __le32 data;
- __le32 msix_vector_fields;
-#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1
-#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0
-#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF
-#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1
-#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF
-#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16
-#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
-#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
+/********************************/
+/* HSI Init Functions constants */
+/********************************/
+
+/* Number of VLAN priorities */
+#define NUM_OF_VLAN_PRIORITIES 8
+
+/* QM per-port init parameters */
+struct init_qm_port_params {
+ u8 active;
+ u8 active_phys_tcs;
+ __le16 num_pbf_cmd_lines;
+ __le16 num_btb_blocks;
+ __le16 reserved;
+};
+
+/* QM per-PQ init parameters */
+struct init_qm_pq_params {
+ u8 vport_id;
+ u8 tc_id;
+ u8 wrr_group;
+ u8 rl_valid;
};
+/* QM per-vport init parameters */
+struct init_qm_vport_params {
+ __le32 vport_rl;
+ __le16 vport_wfq;
+ __le16 first_tx_pq_id[NUM_OF_TCS];
+};
+
+/**************************************/
+/* Init Tool HSI constants and macros */
+/**************************************/
+
+/* Width of GRC address in bits (addresses are specified in dwords) */
+#define GRC_ADDR_BITS 23
+#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
+
+/* indicates an init that should be applied to any phase ID */
+#define ANY_PHASE_ID 0xffff
+
+/* Max size in dwords of a zipped array */
+#define MAX_ZIPPED_SIZE 8192
+
enum init_modes {
- MODE_BB_A0,
+ MODE_RESERVED,
MODE_BB_B0,
MODE_RESERVED2,
MODE_ASIC,
@@ -982,7 +1643,8 @@ enum init_modes {
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
MODE_100G,
- MODE_EAGLE_ENG1_WORKAROUND,
+ MODE_40G,
+ MODE_RESERVED7,
MAX_INIT_MODES
};
@@ -990,489 +1652,307 @@ enum init_phases {
PHASE_ENGINE,
PHASE_PORT,
PHASE_PF,
- PHASE_RESERVED,
+ PHASE_VF,
PHASE_QM_PF,
MAX_INIT_PHASES
};
-/* per encapsulation type enabling flags */
-struct prs_reg_encapsulation_type_en {
- u8 flags;
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2
-#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1
-#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5
-#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3
-#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6
-};
-
-enum pxp_tph_st_hint {
- TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */,
- TPH_ST_HINT_REQUESTER /* Read/Write access by Device */,
- TPH_ST_HINT_TARGET,
- TPH_ST_HINT_TARGET_PRIO,
- MAX_PXP_TPH_ST_HINT
-};
-
-/* QM hardware structure of enable bypass credit mask */
-struct qm_rf_bypass_mask {
- u8 flags;
-#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1
-#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0
-#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1
-#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1
-#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1
-#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2
-#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1
-#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3
-#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1
-#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4
-#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1
-#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5
-#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1
-#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6
-#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1
-#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7
+enum init_split_types {
+ SPLIT_TYPE_NONE,
+ SPLIT_TYPE_PORT,
+ SPLIT_TYPE_PF,
+ SPLIT_TYPE_PORT_PF,
+ SPLIT_TYPE_VF,
+ MAX_INIT_SPLIT_TYPES
};
-/* QM hardware structure of opportunistic credit mask */
-struct qm_rf_opportunistic_mask {
- __le16 flags;
-#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0
-#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1
-#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2
-#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3
-#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4
-#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5
-#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6
-#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7
-#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1
-#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8
-#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F
-#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9
-};
-
-/* QM hardware structure of QM map memory */
-struct qm_rf_pq_map {
- u32 reg;
-#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 /* PQ active */
-#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
-#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF /* RL ID */
-#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
-#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
-#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
-#define QM_RF_PQ_MAP_VOQ_MASK 0x1F /* VOQ */
-#define QM_RF_PQ_MAP_VOQ_SHIFT 18
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 /* WRR weight */
-#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
-#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 /* RL active */
-#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
-#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
-#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
-};
-
-/* Completion params for aggregated interrupt completion */
-struct sdm_agg_int_comp_params {
- __le16 params;
-#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F
-#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0
-#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1
-#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6
-#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF
-#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7
-};
-
-/* SDM operation gen command (generate aggregative interrupt) */
-struct sdm_op_gen {
- __le32 command;
-#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF /* completion parameters 0-15 */
-#define SDM_OP_GEN_COMP_PARAM_SHIFT 0
-#define SDM_OP_GEN_COMP_TYPE_MASK 0xF /* completion type 16-19 */
-#define SDM_OP_GEN_COMP_TYPE_SHIFT 16
-#define SDM_OP_GEN_RESERVED_MASK 0xFFF /* reserved 20-31 */
-#define SDM_OP_GEN_RESERVED_SHIFT 20
-};
-
-/*********************************** Init ************************************/
-
-/* Width of GRC address in bits (addresses are specified in dwords) */
-#define GRC_ADDR_BITS 23
-#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1)
-
-/* indicates an init that should be applied to any phase ID */
-#define ANY_PHASE_ID 0xffff
-
-/* init pattern size in bytes */
-#define INIT_PATTERN_SIZE_BITS 4
-#define MAX_INIT_PATTERN_SIZE BIT(INIT_PATTERN_SIZE_BITS)
-
-/* Max size in dwords of a zipped array */
-#define MAX_ZIPPED_SIZE 8192
-
-/* Global PXP window */
-#define NUM_OF_PXP_WIN 19
-#define PXP_WIN_DWORD_SIZE_BITS 10
-#define PXP_WIN_DWORD_SIZE BIT(PXP_WIN_DWORD_SIZE_BITS)
-#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2)
-#define PXP_WIN_BYTE_SIZE (PXP_WIN_DWORD_SIZE * 4)
-
-/********************************* GRC Dump **********************************/
-
-/* width of GRC dump register sequence length in bits */
-#define DUMP_SEQ_LEN_BITS 8
-#define DUMP_SEQ_LEN_MAX_VAL ((1 << DUMP_SEQ_LEN_BITS) - 1)
-
-/* width of GRC dump memory length in bits */
-#define DUMP_MEM_LEN_BITS 18
-#define DUMP_MEM_LEN_MAX_VAL ((1 << DUMP_MEM_LEN_BITS) - 1)
-
-/* width of register type ID in bits */
-#define REG_TYPE_ID_BITS 6
-#define REG_TYPE_ID_MAX_VAL ((1 << REG_TYPE_ID_BITS) - 1)
-
-/* width of block ID in bits */
-#define BLOCK_ID_BITS 8
-#define BLOCK_ID_MAX_VAL ((1 << BLOCK_ID_BITS) - 1)
-
-/******************************** Idle Check *********************************/
-
-/* max number of idle check predicate immediates */
-#define MAX_IDLE_CHK_PRED_IMM 3
-
-/* max number of idle check argument registers */
-#define MAX_IDLE_CHK_READ_REGS 3
-
-/* max number of idle check loops */
-#define MAX_IDLE_CHK_LOOPS 0x10000
-
-/* max idle check address increment */
-#define MAX_IDLE_CHK_INCREMENT 0x10000
-
-/* inicates an undefined idle check line index */
-#define IDLE_CHK_UNDEFINED_LINE_IDX 0xffffff
-
-/* max number of register values following the idle check header */
-#define IDLE_CHK_MAX_DUMP_REGS 2
-
-/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */
-#define IDLE_CHK_QM_RD_WR_PTR 0
-#define IDLE_CHK_QM_RD_WR_BANK 1
-
-/**************************************/
-/* HSI Functions constants and macros */
-/**************************************/
-
-/* Number of VLAN priorities */
-#define NUM_OF_VLAN_PRIORITIES 8
-
-/* the MCP Trace meta data signautre is duplicated in the perl script that
- * generats the NVRAM images.
- */
-#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa
-
/* Binary buffer header */
struct bin_buffer_hdr {
- u32 offset;
- u32 length /* buffer length in bytes */;
+ __le32 offset;
+ __le32 length;
};
-/* binary buffer types */
-enum bin_buffer_type {
- BIN_BUF_FW_VER_INFO /* fw_ver_info struct */,
- BIN_BUF_INIT_CMD /* init commands */,
- BIN_BUF_INIT_VAL /* init data */,
- BIN_BUF_INIT_MODE_TREE /* init modes tree */,
- BIN_BUF_IRO /* internal RAM offsets array */,
- MAX_BIN_BUFFER_TYPE
-};
-
-/* Chip IDs */
-enum chip_ids {
- CHIP_BB_A0 /* BB A0 chip ID */,
- CHIP_BB_B0 /* BB B0 chip ID */,
- CHIP_K2 /* AH chip ID */,
- MAX_CHIP_IDS
+/* binary init buffer types */
+enum bin_init_buffer_type {
+ BIN_BUF_FW_VER_INFO,
+ BIN_BUF_INIT_CMD,
+ BIN_BUF_INIT_VAL,
+ BIN_BUF_INIT_MODE_TREE,
+ BIN_BUF_IRO,
+ MAX_BIN_INIT_BUFFER_TYPE
};
+/* init array header: raw */
struct init_array_raw_hdr {
__le32 data;
-#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF /* init array params */
-#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
+#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF
+#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4
};
+/* init array header: standard */
struct init_array_standard_hdr {
__le32 data;
-#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
-#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
+#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4
};
+/* init array header: zipped */
struct init_array_zipped_hdr {
__le32 data;
-#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
-#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF
+#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4
};
+/* init array header: pattern */
struct init_array_pattern_hdr {
__le32 data;
-#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
-#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
-#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
-#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
-#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
-#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
+#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF
+#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF
+#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8
};
+/* init array header union */
union init_array_hdr {
- struct init_array_raw_hdr raw /* raw init array header */;
- struct init_array_standard_hdr standard;
- struct init_array_zipped_hdr zipped /* zipped init array header */;
- struct init_array_pattern_hdr pattern /* pattern init array header */;
+ struct init_array_raw_hdr raw;
+ struct init_array_standard_hdr standard;
+ struct init_array_zipped_hdr zipped;
+ struct init_array_pattern_hdr pattern;
};
+/* init array types */
enum init_array_types {
- INIT_ARR_STANDARD /* standard init array */,
- INIT_ARR_ZIPPED /* zipped init array */,
- INIT_ARR_PATTERN /* a repeated pattern */,
+ INIT_ARR_STANDARD,
+ INIT_ARR_ZIPPED,
+ INIT_ARR_PATTERN,
MAX_INIT_ARRAY_TYPES
};
/* init operation: callback */
struct init_callback_op {
- __le32 op_data;
-#define INIT_CALLBACK_OP_OP_MASK 0xF
-#define INIT_CALLBACK_OP_OP_SHIFT 0
-#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
-#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
- __le16 callback_id /* Callback ID */;
- __le16 block_id /* Blocks ID */;
+ __le32 op_data;
+#define INIT_CALLBACK_OP_OP_MASK 0xF
+#define INIT_CALLBACK_OP_OP_SHIFT 0
+#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_CALLBACK_OP_RESERVED_SHIFT 4
+ __le16 callback_id;
+ __le16 block_id;
};
/* init operation: delay */
struct init_delay_op {
- __le32 op_data;
-#define INIT_DELAY_OP_OP_MASK 0xF
-#define INIT_DELAY_OP_OP_SHIFT 0
-#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
-#define INIT_DELAY_OP_RESERVED_SHIFT 4
- __le32 delay /* delay in us */;
+ __le32 op_data;
+#define INIT_DELAY_OP_OP_MASK 0xF
+#define INIT_DELAY_OP_OP_SHIFT 0
+#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF
+#define INIT_DELAY_OP_RESERVED_SHIFT 4
+ __le32 delay;
};
/* init operation: if_mode */
struct init_if_mode_op {
__le32 op_data;
-#define INIT_IF_MODE_OP_OP_MASK 0xF
-#define INIT_IF_MODE_OP_OP_SHIFT 0
-#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
-#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
-#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
-#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
- __le16 reserved2;
- __le16 modes_buf_offset;
+#define INIT_IF_MODE_OP_OP_MASK 0xF
+#define INIT_IF_MODE_OP_OP_SHIFT 0
+#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF
+#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4
+#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16
+ __le16 reserved2;
+ __le16 modes_buf_offset;
};
-/* init operation: if_phase */
+/* init operation: if_phase */
struct init_if_phase_op {
__le32 op_data;
-#define INIT_IF_PHASE_OP_OP_MASK 0xF
-#define INIT_IF_PHASE_OP_OP_SHIFT 0
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
-#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
-#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
-#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
-#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
-#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
+#define INIT_IF_PHASE_OP_OP_MASK 0xF
+#define INIT_IF_PHASE_OP_OP_SHIFT 0
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1
+#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4
+#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF
+#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5
+#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16
__le32 phase_data;
-#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF /* Init phase */
-#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
-#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
-#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
-#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF /* Init phase ID */
-#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
+#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF
+#define INIT_IF_PHASE_OP_PHASE_SHIFT 0
+#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF
+#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8
+#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF
+#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16
};
/* init mode operators */
enum init_mode_ops {
- INIT_MODE_OP_NOT /* init mode not operator */,
- INIT_MODE_OP_OR /* init mode or operator */,
- INIT_MODE_OP_AND /* init mode and operator */,
+ INIT_MODE_OP_NOT,
+ INIT_MODE_OP_OR,
+ INIT_MODE_OP_AND,
MAX_INIT_MODE_OPS
};
/* init operation: raw */
struct init_raw_op {
- __le32 op_data;
-#define INIT_RAW_OP_OP_MASK 0xF
-#define INIT_RAW_OP_OP_SHIFT 0
-#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF /* init param 1 */
-#define INIT_RAW_OP_PARAM1_SHIFT 4
- __le32 param2 /* Init param 2 */;
+ __le32 op_data;
+#define INIT_RAW_OP_OP_MASK 0xF
+#define INIT_RAW_OP_OP_SHIFT 0
+#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF
+#define INIT_RAW_OP_PARAM1_SHIFT 4
+ __le32 param2;
};
/* init array params */
struct init_op_array_params {
- __le16 size /* array size in dwords */;
- __le16 offset /* array start offset in dwords */;
+ __le16 size;
+ __le16 offset;
};
/* Write init operation arguments */
union init_write_args {
- __le32 inline_val;
- __le32 zeros_count;
- __le32 array_offset;
- struct init_op_array_params runtime;
+ __le32 inline_val;
+ __le32 zeros_count;
+ __le32 array_offset;
+ struct init_op_array_params runtime;
};
/* init operation: write */
struct init_write_op {
__le32 data;
-#define INIT_WRITE_OP_OP_MASK 0xF
-#define INIT_WRITE_OP_OP_SHIFT 0
-#define INIT_WRITE_OP_SOURCE_MASK 0x7
-#define INIT_WRITE_OP_SOURCE_SHIFT 4
-#define INIT_WRITE_OP_RESERVED_MASK 0x1
-#define INIT_WRITE_OP_RESERVED_SHIFT 7
-#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
-#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
-#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
-#define INIT_WRITE_OP_ADDRESS_SHIFT 9
- union init_write_args args /* Write init operation arguments */;
+#define INIT_WRITE_OP_OP_MASK 0xF
+#define INIT_WRITE_OP_OP_SHIFT 0
+#define INIT_WRITE_OP_SOURCE_MASK 0x7
+#define INIT_WRITE_OP_SOURCE_SHIFT 4
+#define INIT_WRITE_OP_RESERVED_MASK 0x1
+#define INIT_WRITE_OP_RESERVED_SHIFT 7
+#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1
+#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8
+#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_WRITE_OP_ADDRESS_SHIFT 9
+ union init_write_args args;
};
/* init operation: read */
struct init_read_op {
__le32 op_data;
-#define INIT_READ_OP_OP_MASK 0xF
-#define INIT_READ_OP_OP_SHIFT 0
-#define INIT_READ_OP_POLL_TYPE_MASK 0xF
-#define INIT_READ_OP_POLL_TYPE_SHIFT 4
-#define INIT_READ_OP_RESERVED_MASK 0x1
-#define INIT_READ_OP_RESERVED_SHIFT 8
-#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
-#define INIT_READ_OP_ADDRESS_SHIFT 9
+#define INIT_READ_OP_OP_MASK 0xF
+#define INIT_READ_OP_OP_SHIFT 0
+#define INIT_READ_OP_POLL_TYPE_MASK 0xF
+#define INIT_READ_OP_POLL_TYPE_SHIFT 4
+#define INIT_READ_OP_RESERVED_MASK 0x1
+#define INIT_READ_OP_RESERVED_SHIFT 8
+#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF
+#define INIT_READ_OP_ADDRESS_SHIFT 9
__le32 expected_val;
+
};
/* Init operations union */
union init_op {
- struct init_raw_op raw /* raw init operation */;
- struct init_write_op write /* write init operation */;
- struct init_read_op read /* read init operation */;
- struct init_if_mode_op if_mode /* if_mode init operation */;
- struct init_if_phase_op if_phase /* if_phase init operation */;
- struct init_callback_op callback /* callback init operation */;
- struct init_delay_op delay /* delay init operation */;
+ struct init_raw_op raw;
+ struct init_write_op write;
+ struct init_read_op read;
+ struct init_if_mode_op if_mode;
+ struct init_if_phase_op if_phase;
+ struct init_callback_op callback;
+ struct init_delay_op delay;
};
/* Init command operation types */
enum init_op_types {
- INIT_OP_READ /* GRC read init command */,
- INIT_OP_WRITE /* GRC write init command */,
+ INIT_OP_READ,
+ INIT_OP_WRITE,
INIT_OP_IF_MODE,
INIT_OP_IF_PHASE,
- INIT_OP_DELAY /* delay init command */,
- INIT_OP_CALLBACK /* callback init command */,
+ INIT_OP_DELAY,
+ INIT_OP_CALLBACK,
MAX_INIT_OP_TYPES
};
+/* init polling types */
enum init_poll_types {
- INIT_POLL_NONE /* No polling */,
- INIT_POLL_EQ /* init value is included in the init command */,
- INIT_POLL_OR /* init value is all zeros */,
- INIT_POLL_AND /* init value is an array of values */,
+ INIT_POLL_NONE,
+ INIT_POLL_EQ,
+ INIT_POLL_OR,
+ INIT_POLL_AND,
MAX_INIT_POLL_TYPES
};
/* init source types */
enum init_source_types {
- INIT_SRC_INLINE /* init value is included in the init command */,
- INIT_SRC_ZEROS /* init value is all zeros */,
- INIT_SRC_ARRAY /* init value is an array of values */,
- INIT_SRC_RUNTIME /* init value is provided during runtime */,
+ INIT_SRC_INLINE,
+ INIT_SRC_ZEROS,
+ INIT_SRC_ARRAY,
+ INIT_SRC_RUNTIME,
MAX_INIT_SOURCE_TYPES
};
/* Internal RAM Offsets macro data */
struct iro {
- u32 base /* RAM field offset */;
- u16 m1 /* multiplier 1 */;
- u16 m2 /* multiplier 2 */;
- u16 m3 /* multiplier 3 */;
- u16 size /* RAM field size */;
-};
-
-/* QM per-port init parameters */
-struct init_qm_port_params {
- u8 active /* Indicates if this port is active */;
- u8 num_active_phys_tcs;
- u16 num_pbf_cmd_lines;
- u16 num_btb_blocks;
- __le16 reserved;
+ __le32 base;
+ __le16 m1;
+ __le16 m2;
+ __le16 m3;
+ __le16 size;
};
-/* QM per-PQ init parameters */
-struct init_qm_pq_params {
- u8 vport_id /* VPORT ID */;
- u8 tc_id /* TC ID */;
- u8 wrr_group /* WRR group */;
- u8 reserved;
-};
+/**
+ * @brief qed_dbg_print_attn - Prints attention registers values in the specified results struct.
+ *
+ * @param p_hwfn
+ * @param results - Pointer to the attention read results
+ *
+ * @return error if one of the following holds:
+ * - the version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
-/* QM per-vport init parameters */
-struct init_qm_vport_params {
- u32 vport_rl;
- u16 vport_wfq;
- u16 first_tx_pq_id[NUM_OF_TCS];
-};
+#define MAX_NAME_LEN 16
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD \
0x00f000UL
+
/* Win 3 */
#define GTT_BAR0_MAP_REG_TSDM_RAM \
0x010000UL
+
/* Win 4 */
#define GTT_BAR0_MAP_REG_MSDM_RAM \
0x011000UL
+
/* Win 5 */
#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 \
0x012000UL
+
/* Win 6 */
#define GTT_BAR0_MAP_REG_USDM_RAM \
0x013000UL
+
/* Win 7 */
#define GTT_BAR0_MAP_REG_USDM_RAM_1024 \
0x014000UL
+
/* Win 8 */
#define GTT_BAR0_MAP_REG_USDM_RAM_2048 \
0x015000UL
+
/* Win 9 */
#define GTT_BAR0_MAP_REG_XSDM_RAM \
0x016000UL
+
/* Win 10 */
#define GTT_BAR0_MAP_REG_YSDM_RAM \
0x017000UL
+
/* Win 11 */
#define GTT_BAR0_MAP_REG_PSDM_RAM \
0x018000UL
@@ -1483,772 +1963,718 @@ struct init_qm_vport_params {
* Returns the required host memory size in 4KB units.
* Must be called before all QM init HSI functions.
*
- * @param pf_id - physical function ID
- * @param num_pf_cids - number of connections used by this PF
- * @param num_vf_cids - number of connections used by VFs of this PF
- * @param num_tids - number of tasks used by this PF
- * @param num_pf_pqs - number of PQs used by this PF
- * @param num_vf_pqs - number of PQs used by VFs of this PF
+ * @param pf_id - physical function ID
+ * @param num_pf_cids - number of connections used by this PF
+ * @param num_vf_cids - number of connections used by VFs of this PF
+ * @param num_tids - number of tasks used by this PF
+ * @param num_pf_pqs - number of PQs used by this PF
+ * @param num_vf_pqs - number of PQs used by VFs of this PF
*
* @return The required host memory size in 4KB units.
*/
-u32 qed_qm_pf_mem_size(u8 pf_id,
- u32 num_pf_cids,
- u32 num_vf_cids,
- u32 num_tids,
- u16 num_pf_pqs,
- u16 num_vf_pqs);
+u32 qed_qm_pf_mem_size(u8 pf_id,
+ u32 num_pf_cids,
+ u32 num_vf_cids,
+ u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs);
struct qed_qm_common_rt_init_params {
- u8 max_ports_per_engine;
- u8 max_phys_tcs_per_port;
- bool pf_rl_en;
- bool pf_wfq_en;
- bool vport_rl_en;
- bool vport_wfq_en;
- struct init_qm_port_params *port_params;
+ u8 max_ports_per_engine;
+ u8 max_phys_tcs_per_port;
+ bool pf_rl_en;
+ bool pf_wfq_en;
+ bool vport_rl_en;
+ bool vport_wfq_en;
+ struct init_qm_port_params *port_params;
};
+int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_qm_common_rt_init_params *p_params);
+
+struct qed_qm_pf_rt_init_params {
+ u8 port_id;
+ u8 pf_id;
+ u8 max_phys_tcs_per_port;
+ bool is_first_pf;
+ u32 num_pf_cids;
+ u32 num_vf_cids;
+ u32 num_tids;
+ u16 start_pq;
+ u16 num_pf_pqs;
+ u16 num_vf_pqs;
+ u8 start_vport;
+ u8 num_vports;
+ u8 pf_wfq;
+ u32 pf_rl;
+ struct init_qm_pq_params *pq_params;
+ struct init_qm_vport_params *vport_params;
+};
+
+int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params);
+
/**
- * @brief qed_qm_common_rt_init - Prepare QM runtime init values for the
- * engine phase.
+ * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF
*
* @param p_hwfn
- * @param max_ports_per_engine - max number of ports per engine in HW
- * @param max_phys_tcs_per_port - max number of physical TCs per port in HW
- * @param pf_rl_en - enable per-PF rate limiters
- * @param pf_wfq_en - enable per-PF WFQ
- * @param vport_rl_en - enable per-VPORT rate limiters
- * @param vport_wfq_en - enable per-VPORT WFQ
- * @param port_params - array of size MAX_NUM_PORTS with
- * arameters for each port
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_wfq - WFQ weight. Must be non-zero.
*
* @return 0 on success, -1 on error.
*/
-int qed_qm_common_rt_init(
- struct qed_hwfn *p_hwfn,
- struct qed_qm_common_rt_init_params *p_params);
-
-struct qed_qm_pf_rt_init_params {
- u8 port_id;
- u8 pf_id;
- u8 max_phys_tcs_per_port;
- bool is_first_pf;
- u32 num_pf_cids;
- u32 num_vf_cids;
- u32 num_tids;
- u16 start_pq;
- u16 num_pf_pqs;
- u16 num_vf_pqs;
- u8 start_vport;
- u8 num_vports;
- u8 pf_wfq;
- u32 pf_rl;
- struct init_qm_pq_params *pq_params;
- struct init_qm_vport_params *vport_params;
-};
-
-int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_qm_pf_rt_init_params *p_params);
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
/**
- * @brief qed_init_pf_rl Initializes the rate limit of the specified PF
+ * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF
*
* @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param pf_id - PF ID
- * @param pf_rl - rate limit in Mb/sec units
+ * @param p_ptt - ptt window used for writing the registers
+ * @param pf_id - PF ID
+ * @param pf_rl - rate limit in Mb/sec units
*
* @return 0 on success, -1 on error.
*/
-int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 pf_id,
- u32 pf_rl);
+int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl);
/**
- * @brief qed_init_vport_rl Initializes the rate limit of the specified VPORT
+ * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT
*
* @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param vport_id - VPORT ID
- * @param vport_rl - rate limit in Mb/sec units
+ * @param p_ptt - ptt window used for writing the registers
+ * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
+ * with the VPORT for each TC. This array is filled by
+ * qed_qm_pf_rt_init
+ * @param vport_wfq - WFQ weight. Must be non-zero.
*
* @return 0 on success, -1 on error.
*/
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq);
-int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 vport_id,
- u32 vport_rl);
+/**
+ * @brief qed_init_vport_rl - Initializes the rate limit of the specified VPORT
+ *
+ * @param p_hwfn
+ * @param p_ptt - ptt window used for writing the registers
+ * @param vport_id - VPORT ID
+ * @param vport_rl - rate limit in Mb/sec units
+ *
+ * @return 0 on success, -1 on error.
+ */
+int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl);
/**
* @brief qed_send_qm_stop_cmd Sends a stop command to the QM
*
* @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
+ * @param p_ptt
* @param is_release_cmd - true for release, false for stop.
- * @param is_tx_pq - true for Tx PQs, false for Other PQs.
- * @param start_pq - first PQ ID to stop
- * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ * @param is_tx_pq - true for Tx PQs, false for Other PQs.
+ * @param start_pq - first PQ ID to stop
+ * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ *
+ * @return bool, true if successful, false if timeout occured while waiting for QM command done.
+ */
+bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool is_release_cmd,
+ bool is_tx_pq, u16 start_pq, u16 num_pqs);
+
+/**
+ * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param dest_port - vxlan destination udp port.
+ */
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port);
+
+/**
+ * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param vxlan_enable - vxlan enable flag.
+ */
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool vxlan_enable);
+
+/**
+ * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_gre_enable - eth GRE enable enable flag.
+ * @param ip_gre_enable - IP GRE enable enable flag.
+ */
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool eth_gre_enable, bool ip_gre_enable);
+
+/**
+ * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
*
- * @return bool, true if successful, false if timeout occurred while waiting
- * for QM command done.
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param dest_port - geneve destination udp port.
*/
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port);
-bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- bool is_release_cmd,
- bool is_tx_pq,
- u16 start_pq,
- u16 num_pqs);
-
-/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
-#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
-#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
-/* Tstorm port statistics */
-#define TSTORM_PORT_STAT_OFFSET(port_id) (IRO[1].base + ((port_id) * IRO[1].m1))
-#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
-/* Tstorm ll2 port statistics */
-#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
- (IRO[2].base + ((port_id) * IRO[2].m1))
-#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
-/* Ustorm VF-PF Channel ready flag */
-#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
- (IRO[3].base + ((vf_id) * IRO[3].m1))
-#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
-/* Ustorm Final flr cleanup ack */
-#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) (IRO[4].base + ((pf_id) * IRO[4].m1))
-#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
-/* Ustorm Event ring consumer */
-#define USTORM_EQE_CONS_OFFSET(pf_id) (IRO[5].base + ((pf_id) * IRO[5].m1))
-#define USTORM_EQE_CONS_SIZE (IRO[5].size)
-/* Ustorm Common Queue ring consumer */
-#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \
- (IRO[6].base + ((global_queue_id) * IRO[6].m1))
-#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[6].size)
-/* Xstorm Integration Test Data */
-#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base)
-#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size)
-/* Ystorm Integration Test Data */
-#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base)
-#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size)
-/* Pstorm Integration Test Data */
-#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
-#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
-/* Tstorm Integration Test Data */
-#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
-#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
-/* Mstorm Integration Test Data */
-#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
-#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
-/* Ustorm Integration Test Data */
-#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
-#define USTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
-/* Tstorm producers */
-#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
- (IRO[13].base + ((core_rx_queue_id) * IRO[13].m1))
-#define TSTORM_LL2_RX_PRODS_SIZE (IRO[13].size)
-/* Tstorm LightL2 queue statistics */
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[14].base + ((core_rx_queue_id) * IRO[14].m1))
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[14].size)
-/* Ustorm LiteL2 queue statistics */
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[15].base + ((core_rx_queue_id) * IRO[15].m1))
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[15].size)
-/* Pstorm LiteL2 queue statistics */
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
- (IRO[16].base + ((core_tx_stats_id) * IRO[16].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[16].size)
-/* Mstorm queue statistics */
-#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[17].base + ((stat_counter_id) * IRO[17].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[17].size)
-/* Mstorm producers */
-#define MSTORM_PRODS_OFFSET(queue_id) (IRO[18].base + ((queue_id) * IRO[18].m1))
-#define MSTORM_PRODS_SIZE (IRO[18].size)
-/* TPA agregation timeout in us resolution (on ASIC) */
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[19].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[19].size)
-/* Ustorm queue statistics */
-#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[20].base + ((stat_counter_id) * IRO[20].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[20].size)
-/* Ustorm queue zone */
-#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[21].base + ((queue_id) * IRO[21].m1))
-#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[21].size)
-/* Pstorm queue statistics */
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[22].base + ((stat_counter_id) * IRO[22].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[22].size)
-/* Tstorm last parser message */
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[23].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[23].size)
-/* Tstorm Eth limit Rx rate */
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) (IRO[24].base + ((pf_id) * IRO[24].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[24].size)
-/* Ystorm queue zone */
-#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[25].base + ((queue_id) * IRO[25].m1))
-#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[25].size)
-/* Ystorm cqe producer */
-#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[26].base + ((rss_id) * IRO[26].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[26].size)
-/* Ustorm cqe producer */
-#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[27].base + ((rss_id) * IRO[27].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[27].size)
-/* Ustorm grq producer */
-#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
- (IRO[28].base + ((pf_id) * IRO[28].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[28].size)
-/* Tstorm cmdq-cons of given command queue-id */
-#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
- (IRO[29].base + ((cmdq_queue_id) * IRO[29].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[29].size)
-/* Mstorm rq-cons of given queue-id */
-#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \
- (IRO[30].base + ((rq_queue_id) * IRO[30].m1))
-#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[30].size)
-/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */
-#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[31].base + ((func_id) * IRO[31].m1) + ((bdq_id) * IRO[31].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[31].size)
-/* Tstorm (reflects M-Storm) bdq-external-producer of given fn ID, BDqueue-id */
-#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \
- (IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[32].size)
-/* Tstorm iSCSI RX stats */
-#define TSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[33].base + ((pf_id) * IRO[33].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[33].size)
-/* Mstorm iSCSI RX stats */
-#define MSTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[34].base + ((pf_id) * IRO[34].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[34].size)
-/* Ustorm iSCSI RX stats */
-#define USTORM_ISCSI_RX_STATS_OFFSET(pf_id) \
- (IRO[35].base + ((pf_id) * IRO[35].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[35].size)
-/* Xstorm iSCSI TX stats */
-#define XSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[36].base + ((pf_id) * IRO[36].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[36].size)
-/* Ystorm iSCSI TX stats */
-#define YSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[37].base + ((pf_id) * IRO[37].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[37].size)
-/* Pstorm iSCSI TX stats */
-#define PSTORM_ISCSI_TX_STATS_OFFSET(pf_id) \
- (IRO[38].base + ((pf_id) * IRO[38].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[38].size)
-/* Tstorm FCoE RX stats */
-#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[39].base + ((pf_id) * IRO[39].m1))
-#define TSTORM_FCOE_RX_STATS_SIZE (IRO[39].size)
-/* Mstorm FCoE RX stats */
-#define MSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[40].base + ((pf_id) * IRO[40].m1))
-#define MSTORM_FCOE_RX_STATS_SIZE (IRO[40].size)
-/* Pstorm FCoE TX stats */
-#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
- (IRO[41].base + ((pf_id) * IRO[41].m1))
-#define PSTORM_FCOE_TX_STATS_SIZE (IRO[41].size)
-/* Pstorm RoCE statistics */
-#define PSTORM_ROCE_STAT_OFFSET(stat_counter_id) \
- (IRO[42].base + ((stat_counter_id) * IRO[42].m1))
-#define PSTORM_ROCE_STAT_SIZE (IRO[42].size)
-/* Tstorm RoCE statistics */
-#define TSTORM_ROCE_STAT_OFFSET(stat_counter_id) \
- (IRO[43].base + ((stat_counter_id) * IRO[43].m1))
-#define TSTORM_ROCE_STAT_SIZE (IRO[43].size)
-
-static const struct iro iro_arr[44] = {
- { 0x10, 0x0, 0x0, 0x0, 0x8 },
- { 0x47c8, 0x60, 0x0, 0x0, 0x60 },
- { 0x5e30, 0x20, 0x0, 0x0, 0x20 },
- { 0x510, 0x8, 0x0, 0x0, 0x4 },
- { 0x490, 0x8, 0x0, 0x0, 0x4 },
- { 0x10, 0x8, 0x0, 0x0, 0x2 },
- { 0x90, 0x8, 0x0, 0x0, 0x2 },
- { 0x4940, 0x0, 0x0, 0x0, 0x78 },
- { 0x3de0, 0x0, 0x0, 0x0, 0x78 },
- { 0x2998, 0x0, 0x0, 0x0, 0x78 },
- { 0x4750, 0x0, 0x0, 0x0, 0x78 },
- { 0x56d0, 0x0, 0x0, 0x0, 0x78 },
- { 0x7e50, 0x0, 0x0, 0x0, 0x78 },
- { 0x100, 0x8, 0x0, 0x0, 0x8 },
- { 0x5c10, 0x10, 0x0, 0x0, 0x10 },
- { 0xb508, 0x30, 0x0, 0x0, 0x30 },
- { 0x95c0, 0x30, 0x0, 0x0, 0x30 },
- { 0x58a0, 0x40, 0x0, 0x0, 0x40 },
- { 0x200, 0x10, 0x0, 0x0, 0x8 },
- { 0xa230, 0x0, 0x0, 0x0, 0x4 },
- { 0x8058, 0x40, 0x0, 0x0, 0x30 },
- { 0xd00, 0x8, 0x0, 0x0, 0x8 },
- { 0x2b30, 0x80, 0x0, 0x0, 0x38 },
- { 0xa808, 0x0, 0x0, 0x0, 0xf0 },
- { 0xa8f8, 0x8, 0x0, 0x0, 0x8 },
- { 0x80, 0x8, 0x0, 0x0, 0x8 },
- { 0xac0, 0x8, 0x0, 0x0, 0x8 },
- { 0x2580, 0x8, 0x0, 0x0, 0x8 },
- { 0x2500, 0x8, 0x0, 0x0, 0x8 },
- { 0x440, 0x8, 0x0, 0x0, 0x2 },
- { 0x1800, 0x8, 0x0, 0x0, 0x2 },
- { 0x1a00, 0x10, 0x8, 0x0, 0x2 },
- { 0x640, 0x10, 0x8, 0x0, 0x2 },
- { 0xd9b8, 0x38, 0x0, 0x0, 0x24 },
- { 0x11048, 0x10, 0x0, 0x0, 0x8 },
- { 0x11678, 0x38, 0x0, 0x0, 0x18 },
- { 0xaec0, 0x30, 0x0, 0x0, 0x10 },
- { 0x8700, 0x28, 0x0, 0x0, 0x18 },
- { 0xec00, 0x10, 0x0, 0x0, 0x10 },
- { 0xde38, 0x40, 0x0, 0x0, 0x30 },
- { 0x121a8, 0x38, 0x0, 0x0, 0x8 },
- { 0xf068, 0x20, 0x0, 0x0, 0x20 },
- { 0x2b68, 0x80, 0x0, 0x0, 0x10 },
- { 0x4ab8, 0x10, 0x0, 0x0, 0x10 },
+/**
+ * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ *
+ * @param p_ptt - ptt window used for writing the registers.
+ * @param eth_geneve_enable - eth GENEVE enable enable flag.
+ * @param ip_geneve_enable - IP GENEVE enable enable flag.
+ */
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool eth_geneve_enable, bool ip_geneve_enable);
+
+#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
+#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
+ (IRO[1].base + ((port_id) * IRO[1].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
+#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
+ (IRO[3].base + ((vf_id) * IRO[3].m1))
+#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
+#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
+ (IRO[4].base + (pf_id) * IRO[4].m1)
+#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
+#define USTORM_EQE_CONS_OFFSET(pf_id) \
+ (IRO[5].base + ((pf_id) * IRO[5].m1))
+#define USTORM_EQE_CONS_SIZE (IRO[5].size)
+#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
+ (IRO[6].base + ((queue_zone_id) * IRO[6].m1))
+#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
+#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
+ (IRO[7].base + ((queue_zone_id) * IRO[7].m1))
+#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[18].base + ((stat_counter_id) * IRO[18].m1))
+#define MSTORM_QUEUE_STAT_SIZE (IRO[18].size)
+#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
+ (IRO[19].base + ((queue_id) * IRO[19].m1))
+#define MSTORM_ETH_PF_PRODS_SIZE (IRO[19].size)
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[20].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[20].size)
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[21].base + ((pf_id) * IRO[21].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[21].size)
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[22].base + ((stat_counter_id) * IRO[22].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[22].size)
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[23].base + ((pf_id) * IRO[23].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[23].size)
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[24].base + ((stat_counter_id) * IRO[24].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[24].size)
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[25].base + ((pf_id) * IRO[25].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[25].size)
+#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(ethtype) \
+ (IRO[26].base + ((ethtype) * IRO[26].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[26].size)
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[27].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[27].size)
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+ (IRO[28].base + ((pf_id) * IRO[28].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[28].size)
+#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
+ (IRO[29].base + ((queue_id) * IRO[29].m1))
+#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[29].size)
+
+static const struct iro iro_arr[46] = {
+ {0x0, 0x0, 0x0, 0x0, 0x8},
+ {0x4cb0, 0x78, 0x0, 0x0, 0x78},
+ {0x6318, 0x20, 0x0, 0x0, 0x20},
+ {0xb00, 0x8, 0x0, 0x0, 0x4},
+ {0xa80, 0x8, 0x0, 0x0, 0x4},
+ {0x0, 0x8, 0x0, 0x0, 0x2},
+ {0x80, 0x8, 0x0, 0x0, 0x4},
+ {0x84, 0x8, 0x0, 0x0, 0x2},
+ {0x4bc0, 0x0, 0x0, 0x0, 0x78},
+ {0x3df0, 0x0, 0x0, 0x0, 0x78},
+ {0x29b0, 0x0, 0x0, 0x0, 0x78},
+ {0x4c38, 0x0, 0x0, 0x0, 0x78},
+ {0x4a48, 0x0, 0x0, 0x0, 0x78},
+ {0x7e48, 0x0, 0x0, 0x0, 0x78},
+ {0xa28, 0x8, 0x0, 0x0, 0x8},
+ {0x60f8, 0x10, 0x0, 0x0, 0x10},
+ {0xb820, 0x30, 0x0, 0x0, 0x30},
+ {0x95b8, 0x30, 0x0, 0x0, 0x30},
+ {0x4c18, 0x80, 0x0, 0x0, 0x40},
+ {0x1f8, 0x4, 0x0, 0x0, 0x4},
+ {0xc9a8, 0x0, 0x0, 0x0, 0x4},
+ {0x4c58, 0x80, 0x0, 0x0, 0x20},
+ {0x8050, 0x40, 0x0, 0x0, 0x30},
+ {0xe770, 0x60, 0x0, 0x0, 0x60},
+ {0x2b48, 0x80, 0x0, 0x0, 0x38},
+ {0xdf88, 0x78, 0x0, 0x0, 0x78},
+ {0x1f8, 0x4, 0x0, 0x0, 0x4},
+ {0xacf0, 0x0, 0x0, 0x0, 0xf0},
+ {0xade0, 0x8, 0x0, 0x0, 0x8},
+ {0x1f8, 0x8, 0x0, 0x0, 0x8},
+ {0xac0, 0x8, 0x0, 0x0, 0x8},
+ {0x2578, 0x8, 0x0, 0x0, 0x8},
+ {0x24f8, 0x8, 0x0, 0x0, 0x8},
+ {0x0, 0x8, 0x0, 0x0, 0x8},
+ {0x200, 0x10, 0x8, 0x0, 0x8},
+ {0xb78, 0x10, 0x8, 0x0, 0x2},
+ {0xd888, 0x38, 0x0, 0x0, 0x24},
+ {0x12120, 0x10, 0x0, 0x0, 0x8},
+ {0x11b20, 0x38, 0x0, 0x0, 0x18},
+ {0xa8c0, 0x30, 0x0, 0x0, 0x10},
+ {0x86f8, 0x28, 0x0, 0x0, 0x18},
+ {0xeff8, 0x10, 0x0, 0x0, 0x10},
+ {0xdd08, 0x48, 0x0, 0x0, 0x38},
+ {0xf460, 0x20, 0x0, 0x0, 0x20},
+ {0x2b80, 0x80, 0x0, 0x0, 0x10},
+ {0x5000, 0x10, 0x0, 0x0, 0x10},
};
/* Runtime array offsets */
-#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
-#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
-#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
-#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
-#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
-#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
-#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
-#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
-#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
-#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
-#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
-#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
-#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
-#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
-#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
-#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
-#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
-#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
-#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
-#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
-#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
-#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
-#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
-#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
-#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
-#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
-#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
-#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
-#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
-#define CAU_REG_PI_MEMORY_RT_SIZE 4416
-#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
-#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
-#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
-#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
-#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
-#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
-#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
-#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
-#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
-#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
-#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
-#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
-#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
-#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
-#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
-#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
-#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
-#define SRC_REG_FIRSTFREE_RT_SIZE 2
-#define SRC_REG_LASTFREE_RT_OFFSET 6667
-#define SRC_REG_LASTFREE_RT_SIZE 2
-#define SRC_REG_COUNTFREE_RT_OFFSET 6669
-#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
-#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
-#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
-#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
-#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
-#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
-#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6676
-#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6677
-#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6678
-#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6679
-#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6680
-#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6681
-#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6682
-#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6683
-#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6684
-#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6685
-#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6686
-#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6687
-#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6688
-#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
-#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
-#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6691
-#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6692
-#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6693
-#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6694
-#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6695
-#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6696
-#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6697
-#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6698
-#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6699
-#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6700
-#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6701
-#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6702
-#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6703
-#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
-#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28703
-#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28704
-#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28705
-#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28706
-#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28707
-#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28708
-#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28709
-#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28710
-#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28711
-#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28712
-#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28713
-#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
-#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29129
-#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
-#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29641
-#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29642
-#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29643
-#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29644
-#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29645
-#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29646
-#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29647
-#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29648
-#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29649
-#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29650
-#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29651
-#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29652
-#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29653
-#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29654
-#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29655
-#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29656
-#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29657
-#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29658
-#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29659
-#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29660
-#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29661
-#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29662
-#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29663
-#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29664
-#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29665
-#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29666
-#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29667
-#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29668
-#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29669
-#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29670
-#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29671
-#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29672
-#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29673
-#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29674
-#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29675
-#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29676
-#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29677
-#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29678
-#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29679
-#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29680
-#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29681
-#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29682
-#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29683
-#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29684
-#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29685
-#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29686
-#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29687
-#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29688
-#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29689
-#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29690
-#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29691
-#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29692
-#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29693
-#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29694
-#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29695
-#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29696
-#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29697
-#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29698
-#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29699
-#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29700
-#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29701
-#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29702
-#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29703
-#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29704
-#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29705
-#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29706
-#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29707
-#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29708
-#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
-#define QM_REG_VOQCRDLINE_RT_OFFSET 29836
-#define QM_REG_VOQCRDLINE_RT_SIZE 20
-#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29856
-#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
-#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29876
-#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29877
-#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29878
-#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29879
-#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29880
-#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29881
-#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29882
-#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29883
-#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29884
-#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29885
-#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29886
-#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29887
-#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29888
-#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29889
-#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29890
-#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29891
-#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29892
-#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29893
-#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29894
-#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29895
-#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29896
-#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29897
-#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29898
-#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29899
-#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29900
-#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29901
-#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29902
-#define QM_REG_PQTX2PF_0_RT_OFFSET 29903
-#define QM_REG_PQTX2PF_1_RT_OFFSET 29904
-#define QM_REG_PQTX2PF_2_RT_OFFSET 29905
-#define QM_REG_PQTX2PF_3_RT_OFFSET 29906
-#define QM_REG_PQTX2PF_4_RT_OFFSET 29907
-#define QM_REG_PQTX2PF_5_RT_OFFSET 29908
-#define QM_REG_PQTX2PF_6_RT_OFFSET 29909
-#define QM_REG_PQTX2PF_7_RT_OFFSET 29910
-#define QM_REG_PQTX2PF_8_RT_OFFSET 29911
-#define QM_REG_PQTX2PF_9_RT_OFFSET 29912
-#define QM_REG_PQTX2PF_10_RT_OFFSET 29913
-#define QM_REG_PQTX2PF_11_RT_OFFSET 29914
-#define QM_REG_PQTX2PF_12_RT_OFFSET 29915
-#define QM_REG_PQTX2PF_13_RT_OFFSET 29916
-#define QM_REG_PQTX2PF_14_RT_OFFSET 29917
-#define QM_REG_PQTX2PF_15_RT_OFFSET 29918
-#define QM_REG_PQTX2PF_16_RT_OFFSET 29919
-#define QM_REG_PQTX2PF_17_RT_OFFSET 29920
-#define QM_REG_PQTX2PF_18_RT_OFFSET 29921
-#define QM_REG_PQTX2PF_19_RT_OFFSET 29922
-#define QM_REG_PQTX2PF_20_RT_OFFSET 29923
-#define QM_REG_PQTX2PF_21_RT_OFFSET 29924
-#define QM_REG_PQTX2PF_22_RT_OFFSET 29925
-#define QM_REG_PQTX2PF_23_RT_OFFSET 29926
-#define QM_REG_PQTX2PF_24_RT_OFFSET 29927
-#define QM_REG_PQTX2PF_25_RT_OFFSET 29928
-#define QM_REG_PQTX2PF_26_RT_OFFSET 29929
-#define QM_REG_PQTX2PF_27_RT_OFFSET 29930
-#define QM_REG_PQTX2PF_28_RT_OFFSET 29931
-#define QM_REG_PQTX2PF_29_RT_OFFSET 29932
-#define QM_REG_PQTX2PF_30_RT_OFFSET 29933
-#define QM_REG_PQTX2PF_31_RT_OFFSET 29934
-#define QM_REG_PQTX2PF_32_RT_OFFSET 29935
-#define QM_REG_PQTX2PF_33_RT_OFFSET 29936
-#define QM_REG_PQTX2PF_34_RT_OFFSET 29937
-#define QM_REG_PQTX2PF_35_RT_OFFSET 29938
-#define QM_REG_PQTX2PF_36_RT_OFFSET 29939
-#define QM_REG_PQTX2PF_37_RT_OFFSET 29940
-#define QM_REG_PQTX2PF_38_RT_OFFSET 29941
-#define QM_REG_PQTX2PF_39_RT_OFFSET 29942
-#define QM_REG_PQTX2PF_40_RT_OFFSET 29943
-#define QM_REG_PQTX2PF_41_RT_OFFSET 29944
-#define QM_REG_PQTX2PF_42_RT_OFFSET 29945
-#define QM_REG_PQTX2PF_43_RT_OFFSET 29946
-#define QM_REG_PQTX2PF_44_RT_OFFSET 29947
-#define QM_REG_PQTX2PF_45_RT_OFFSET 29948
-#define QM_REG_PQTX2PF_46_RT_OFFSET 29949
-#define QM_REG_PQTX2PF_47_RT_OFFSET 29950
-#define QM_REG_PQTX2PF_48_RT_OFFSET 29951
-#define QM_REG_PQTX2PF_49_RT_OFFSET 29952
-#define QM_REG_PQTX2PF_50_RT_OFFSET 29953
-#define QM_REG_PQTX2PF_51_RT_OFFSET 29954
-#define QM_REG_PQTX2PF_52_RT_OFFSET 29955
-#define QM_REG_PQTX2PF_53_RT_OFFSET 29956
-#define QM_REG_PQTX2PF_54_RT_OFFSET 29957
-#define QM_REG_PQTX2PF_55_RT_OFFSET 29958
-#define QM_REG_PQTX2PF_56_RT_OFFSET 29959
-#define QM_REG_PQTX2PF_57_RT_OFFSET 29960
-#define QM_REG_PQTX2PF_58_RT_OFFSET 29961
-#define QM_REG_PQTX2PF_59_RT_OFFSET 29962
-#define QM_REG_PQTX2PF_60_RT_OFFSET 29963
-#define QM_REG_PQTX2PF_61_RT_OFFSET 29964
-#define QM_REG_PQTX2PF_62_RT_OFFSET 29965
-#define QM_REG_PQTX2PF_63_RT_OFFSET 29966
-#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29967
-#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29968
-#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29969
-#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29970
-#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29971
-#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29972
-#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29973
-#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29974
-#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29975
-#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29976
-#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29977
-#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29978
-#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29979
-#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29980
-#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29981
-#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29982
-#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29983
-#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29984
-#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29985
-#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29986
-#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29987
-#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29988
-#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29989
-#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29990
-#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29991
-#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29992
-#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29993
-#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29994
-#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29995
-#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
-#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30251
-#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
-#define QM_REG_RLGLBLCRD_RT_OFFSET 30507
-#define QM_REG_RLGLBLCRD_RT_SIZE 256
-#define QM_REG_RLGLBLENABLE_RT_OFFSET 30763
-#define QM_REG_RLPFPERIOD_RT_OFFSET 30764
-#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30765
-#define QM_REG_RLPFINCVAL_RT_OFFSET 30766
-#define QM_REG_RLPFINCVAL_RT_SIZE 16
-#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30782
-#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_RLPFCRD_RT_OFFSET 30798
-#define QM_REG_RLPFCRD_RT_SIZE 16
-#define QM_REG_RLPFENABLE_RT_OFFSET 30814
-#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30815
-#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30816
-#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
-#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30832
-#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
-#define QM_REG_WFQPFCRD_RT_OFFSET 30848
-#define QM_REG_WFQPFCRD_RT_SIZE 160
-#define QM_REG_WFQPFENABLE_RT_OFFSET 31008
-#define QM_REG_WFQVPENABLE_RT_OFFSET 31009
-#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31010
-#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
-#define QM_REG_TXPQMAP_RT_OFFSET 31522
-#define QM_REG_TXPQMAP_RT_SIZE 512
-#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32034
-#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32546
-#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 33058
-#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33570
-#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33730
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33731
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33732
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33733
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33734
-#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33735
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33736
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33737
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33741
-#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33745
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33749
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33750
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33782
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33798
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33814
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33830
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33846
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33847
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33848
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33849
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33850
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33851
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33852
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33853
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33854
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33855
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33856
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33857
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33858
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33859
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33860
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33861
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33862
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33863
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33864
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33865
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33866
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33867
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33868
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33869
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33870
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33871
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33872
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33873
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33874
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33875
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33876
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33877
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33878
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33879
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33880
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33881
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33882
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33883
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33884
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33885
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33886
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33887
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33888
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33889
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33890
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33891
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33892
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33893
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33894
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33895
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33896
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33897
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33898
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33899
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33900
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33901
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33902
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33903
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33904
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33905
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33906
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33907
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33908
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33909
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33910
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33911
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33912
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33913
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33914
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33915
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33916
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33917
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33918
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33919
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33920
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33921
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33922
-
-#define RUNTIME_ARRAY_SIZE 33923
+#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
+#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1
+#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2
+#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3
+#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4
+#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5
+#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6
+#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7
+#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8
+#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9
+#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10
+#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11
+#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12
+#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13
+#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14
+#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15
+#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16
+#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17
+#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18
+#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19
+#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20
+#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21
+#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22
+#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23
+#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761
+#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736
+#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497
+#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736
+#define CAU_REG_PI_MEMORY_RT_OFFSET 2233
+#define CAU_REG_PI_MEMORY_RT_SIZE 4416
+#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649
+#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650
+#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651
+#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652
+#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653
+#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654
+#define PRS_REG_SEARCH_FCOE_RT_OFFSET 6655
+#define PRS_REG_SEARCH_ROCE_RT_OFFSET 6656
+#define PRS_REG_ROCE_DEST_QP_MAX_VF_RT_OFFSET 6657
+#define PRS_REG_ROCE_DEST_QP_MAX_PF_RT_OFFSET 6658
+#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659
+#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660
+#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661
+#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662
+#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663
+#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664
+#define SRC_REG_FIRSTFREE_RT_OFFSET 6665
+#define SRC_REG_FIRSTFREE_RT_SIZE 2
+#define SRC_REG_LASTFREE_RT_OFFSET 6667
+#define SRC_REG_LASTFREE_RT_SIZE 2
+#define SRC_REG_COUNTFREE_RT_OFFSET 6669
+#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670
+#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671
+#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672
+#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673
+#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674
+#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675
+#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676
+#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677
+#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678
+#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679
+#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680
+#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681
+#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682
+#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683
+#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684
+#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685
+#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686
+#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687
+#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688
+#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689
+#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690
+#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691
+#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692
+#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693
+#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694
+#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695
+#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696
+#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697
+#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698
+#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699
+#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700
+#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701
+#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702
+#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703
+#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704
+#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000
+#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704
+#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705
+#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706
+#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707
+#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708
+#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709
+#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710
+#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711
+#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712
+#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713
+#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714
+#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416
+#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130
+#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512
+#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642
+#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643
+#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644
+#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645
+#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646
+#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647
+#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648
+#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649
+#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650
+#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651
+#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652
+#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653
+#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654
+#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655
+#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656
+#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657
+#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658
+#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659
+#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660
+#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661
+#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662
+#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663
+#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664
+#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665
+#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666
+#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667
+#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668
+#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669
+#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670
+#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671
+#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672
+#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673
+#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674
+#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675
+#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676
+#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677
+#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678
+#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679
+#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680
+#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681
+#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682
+#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683
+#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684
+#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685
+#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686
+#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687
+#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688
+#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689
+#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690
+#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691
+#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692
+#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693
+#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694
+#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695
+#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696
+#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697
+#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698
+#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699
+#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700
+#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701
+#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702
+#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703
+#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704
+#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705
+#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706
+#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707
+#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708
+#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709
+#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128
+#define QM_REG_VOQCRDLINE_RT_OFFSET 29837
+#define QM_REG_VOQCRDLINE_RT_SIZE 20
+#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857
+#define QM_REG_VOQINITCRDLINE_RT_SIZE 20
+#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877
+#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878
+#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879
+#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880
+#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881
+#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882
+#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883
+#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884
+#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885
+#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886
+#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887
+#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888
+#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889
+#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890
+#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891
+#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892
+#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893
+#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894
+#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895
+#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896
+#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897
+#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898
+#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899
+#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900
+#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901
+#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902
+#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903
+#define QM_REG_PQTX2PF_0_RT_OFFSET 29904
+#define QM_REG_PQTX2PF_1_RT_OFFSET 29905
+#define QM_REG_PQTX2PF_2_RT_OFFSET 29906
+#define QM_REG_PQTX2PF_3_RT_OFFSET 29907
+#define QM_REG_PQTX2PF_4_RT_OFFSET 29908
+#define QM_REG_PQTX2PF_5_RT_OFFSET 29909
+#define QM_REG_PQTX2PF_6_RT_OFFSET 29910
+#define QM_REG_PQTX2PF_7_RT_OFFSET 29911
+#define QM_REG_PQTX2PF_8_RT_OFFSET 29912
+#define QM_REG_PQTX2PF_9_RT_OFFSET 29913
+#define QM_REG_PQTX2PF_10_RT_OFFSET 29914
+#define QM_REG_PQTX2PF_11_RT_OFFSET 29915
+#define QM_REG_PQTX2PF_12_RT_OFFSET 29916
+#define QM_REG_PQTX2PF_13_RT_OFFSET 29917
+#define QM_REG_PQTX2PF_14_RT_OFFSET 29918
+#define QM_REG_PQTX2PF_15_RT_OFFSET 29919
+#define QM_REG_PQTX2PF_16_RT_OFFSET 29920
+#define QM_REG_PQTX2PF_17_RT_OFFSET 29921
+#define QM_REG_PQTX2PF_18_RT_OFFSET 29922
+#define QM_REG_PQTX2PF_19_RT_OFFSET 29923
+#define QM_REG_PQTX2PF_20_RT_OFFSET 29924
+#define QM_REG_PQTX2PF_21_RT_OFFSET 29925
+#define QM_REG_PQTX2PF_22_RT_OFFSET 29926
+#define QM_REG_PQTX2PF_23_RT_OFFSET 29927
+#define QM_REG_PQTX2PF_24_RT_OFFSET 29928
+#define QM_REG_PQTX2PF_25_RT_OFFSET 29929
+#define QM_REG_PQTX2PF_26_RT_OFFSET 29930
+#define QM_REG_PQTX2PF_27_RT_OFFSET 29931
+#define QM_REG_PQTX2PF_28_RT_OFFSET 29932
+#define QM_REG_PQTX2PF_29_RT_OFFSET 29933
+#define QM_REG_PQTX2PF_30_RT_OFFSET 29934
+#define QM_REG_PQTX2PF_31_RT_OFFSET 29935
+#define QM_REG_PQTX2PF_32_RT_OFFSET 29936
+#define QM_REG_PQTX2PF_33_RT_OFFSET 29937
+#define QM_REG_PQTX2PF_34_RT_OFFSET 29938
+#define QM_REG_PQTX2PF_35_RT_OFFSET 29939
+#define QM_REG_PQTX2PF_36_RT_OFFSET 29940
+#define QM_REG_PQTX2PF_37_RT_OFFSET 29941
+#define QM_REG_PQTX2PF_38_RT_OFFSET 29942
+#define QM_REG_PQTX2PF_39_RT_OFFSET 29943
+#define QM_REG_PQTX2PF_40_RT_OFFSET 29944
+#define QM_REG_PQTX2PF_41_RT_OFFSET 29945
+#define QM_REG_PQTX2PF_42_RT_OFFSET 29946
+#define QM_REG_PQTX2PF_43_RT_OFFSET 29947
+#define QM_REG_PQTX2PF_44_RT_OFFSET 29948
+#define QM_REG_PQTX2PF_45_RT_OFFSET 29949
+#define QM_REG_PQTX2PF_46_RT_OFFSET 29950
+#define QM_REG_PQTX2PF_47_RT_OFFSET 29951
+#define QM_REG_PQTX2PF_48_RT_OFFSET 29952
+#define QM_REG_PQTX2PF_49_RT_OFFSET 29953
+#define QM_REG_PQTX2PF_50_RT_OFFSET 29954
+#define QM_REG_PQTX2PF_51_RT_OFFSET 29955
+#define QM_REG_PQTX2PF_52_RT_OFFSET 29956
+#define QM_REG_PQTX2PF_53_RT_OFFSET 29957
+#define QM_REG_PQTX2PF_54_RT_OFFSET 29958
+#define QM_REG_PQTX2PF_55_RT_OFFSET 29959
+#define QM_REG_PQTX2PF_56_RT_OFFSET 29960
+#define QM_REG_PQTX2PF_57_RT_OFFSET 29961
+#define QM_REG_PQTX2PF_58_RT_OFFSET 29962
+#define QM_REG_PQTX2PF_59_RT_OFFSET 29963
+#define QM_REG_PQTX2PF_60_RT_OFFSET 29964
+#define QM_REG_PQTX2PF_61_RT_OFFSET 29965
+#define QM_REG_PQTX2PF_62_RT_OFFSET 29966
+#define QM_REG_PQTX2PF_63_RT_OFFSET 29967
+#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968
+#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969
+#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970
+#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971
+#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972
+#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973
+#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974
+#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975
+#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976
+#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977
+#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978
+#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979
+#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980
+#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981
+#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982
+#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983
+#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984
+#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985
+#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986
+#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987
+#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988
+#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989
+#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990
+#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991
+#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992
+#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993
+#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994
+#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995
+#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996
+#define QM_REG_RLGLBLINCVAL_RT_SIZE 256
+#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252
+#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256
+#define QM_REG_RLGLBLCRD_RT_OFFSET 30508
+#define QM_REG_RLGLBLCRD_RT_SIZE 256
+#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764
+#define QM_REG_RLPFPERIOD_RT_OFFSET 30765
+#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766
+#define QM_REG_RLPFINCVAL_RT_OFFSET 30767
+#define QM_REG_RLPFINCVAL_RT_SIZE 16
+#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783
+#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_RLPFCRD_RT_OFFSET 30799
+#define QM_REG_RLPFCRD_RT_SIZE 16
+#define QM_REG_RLPFENABLE_RT_OFFSET 30815
+#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816
+#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817
+#define QM_REG_WFQPFWEIGHT_RT_SIZE 16
+#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833
+#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16
+#define QM_REG_WFQPFCRD_RT_OFFSET 30849
+#define QM_REG_WFQPFCRD_RT_SIZE 160
+#define QM_REG_WFQPFENABLE_RT_OFFSET 31009
+#define QM_REG_WFQVPENABLE_RT_OFFSET 31010
+#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011
+#define QM_REG_BASEADDRTXPQ_RT_SIZE 512
+#define QM_REG_TXPQMAP_RT_OFFSET 31523
+#define QM_REG_TXPQMAP_RT_SIZE 512
+#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035
+#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32547
+#define QM_REG_WFQVPCRD_RT_SIZE 512
+#define QM_REG_WFQVPMAP_RT_OFFSET 33059
+#define QM_REG_WFQVPMAP_RT_SIZE 512
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571
+#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735
+#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742
+#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
+#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847
+#define NIG_REG_ROCE_DUPLICATE_TO_HOST_RT_OFFSET 33848
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33849
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33850
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33851
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33852
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33853
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33854
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33855
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33856
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33857
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33858
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33859
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33860
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33861
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33862
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33863
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33864
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33865
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33866
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33867
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33868
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33869
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33870
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33871
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33872
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33873
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33874
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33875
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33876
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33877
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33878
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33879
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33880
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33881
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33882
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33883
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33884
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33885
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33886
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33887
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33888
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33889
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33890
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33891
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33892
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33893
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33894
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33895
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33896
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33897
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33898
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33899
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33900
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33901
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33902
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33903
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33904
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33905
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33906
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33907
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33908
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33909
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33910
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33911
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33912
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33913
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33914
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33915
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33916
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33917
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33918
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33919
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33920
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33921
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33922
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33923
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33924
+
+#define RUNTIME_ARRAY_SIZE 33925
/* The eth storm context for the Tstorm */
struct tstorm_eth_conn_st_ctx {
@@ -2266,266 +2692,266 @@ struct xstorm_eth_conn_st_ctx {
};
struct xstorm_eth_conn_ag_ctx {
- u8 reserved0 /* cdu_validation */;
- u8 eth_state /* state */;
- u8 flags0;
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 /* bit4 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 /* bit6 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 /* bit7 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
- u8 flags1;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 /* bit8 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 /* bit9 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 /* bit10 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 /* bit11 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 /* bit12 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 /* bit13 */
-#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 /* bit14 */
-#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 /* bit15 */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+ u8 reserved0;
+ u8 eth_state;
+ u8 flags0;
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
- u8 flags4;
-#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 /* cf11 */
-#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 /* cf12 */
-#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 /* cf13 */
-#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 /* cf14 */
-#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 /* cf15 */
-#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 /* cf16 */
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 /* cf18 */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 /* cf19 */
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 /* cf20 */
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 /* cf21 */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 /* cf22 */
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 /* cf11en */
-#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 /* cf12en */
-#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 /* cf13en */
-#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 /* cf14en */
-#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 /* cf15en */
-#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 /* cf16en */
-#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 /* cf18en */
-#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 /* cf19en */
-#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 /* cf20en */
-#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 /* cf21en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 /* cf22en */
-#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 /* cf23en */
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 /* rule0en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 /* rule1en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 /* rule2en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 /* rule3en */
-#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 /* rule4en */
-#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 /* rule8en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 /* rule9en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 /* rule10en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 /* rule11en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 /* rule12en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 /* rule13en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 /* rule14en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 /* rule15en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 /* rule16en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 /* rule17en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 /* rule18en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 /* rule19en */
-#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 /* rule20en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 /* rule21en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 /* rule22en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 /* rule23en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 /* rule24en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 /* rule25en */
-#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 /* bit16 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 /* bit17 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 /* bit18 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 /* bit19 */
-#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 /* bit20 */
-#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 /* bit21 */
-#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 /* cf23 */
-#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
- u8 edpm_event_id /* byte2 */;
- __le16 physical_q0 /* physical_q0 */;
- __le16 word1 /* physical_q1 */;
- __le16 edpm_num_bds /* physical_q2 */;
- __le16 tx_bd_cons /* word3 */;
- __le16 tx_bd_prod /* word4 */;
- __le16 go_to_bd_cons /* word5 */;
- __le16 conn_dpi /* conn_dpi */;
- u8 byte3 /* byte3 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- u8 byte6 /* byte6 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* cf_array0 */;
- __le32 reg6 /* cf_array1 */;
- __le16 word7 /* word7 */;
- __le16 word8 /* word8 */;
- __le16 word9 /* word9 */;
- __le16 word10 /* word10 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- __le32 reg9 /* reg9 */;
- u8 byte7 /* byte7 */;
- u8 byte8 /* byte8 */;
- u8 byte9 /* byte9 */;
- u8 byte10 /* byte10 */;
- u8 byte11 /* byte11 */;
- u8 byte12 /* byte12 */;
- u8 byte13 /* byte13 */;
- u8 byte14 /* byte14 */;
- u8 byte15 /* byte15 */;
- u8 byte16 /* byte16 */;
- __le16 word11 /* word11 */;
- __le32 reg10 /* reg10 */;
- __le32 reg11 /* reg11 */;
- __le32 reg12 /* reg12 */;
- __le32 reg13 /* reg13 */;
- __le32 reg14 /* reg14 */;
- __le32 reg15 /* reg15 */;
- __le32 reg16 /* reg16 */;
- __le32 reg17 /* reg17 */;
- __le32 reg18 /* reg18 */;
- __le32 reg19 /* reg19 */;
- __le16 word12 /* word12 */;
- __le16 word13 /* word13 */;
- __le16 word14 /* word14 */;
- __le16 word15 /* word15 */;
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+ u8 edpm_event_id;
+ __le16 physical_q0;
+ __le16 quota;
+ __le16 edpm_num_bds;
+ __le16 tx_bd_cons;
+ __le16 tx_bd_prod;
+ __le16 tx_class;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 byte16;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+ __le32 reg18;
+ __le32 reg19;
+ __le16 word12;
+ __le16 word13;
+ __le16 word14;
+ __le16 word15;
};
/* The eth storm context for the Ystorm */
@@ -2534,220 +2960,220 @@ struct ystorm_eth_conn_st_ctx {
};
struct ystorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf0 */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 /* cf1 */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* cf2 */
-#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 byte0;
+ u8 state;
+ u8 flags0;
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf0en */
-#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */
-#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* word0 */;
- __le32 terminate_spqe /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le16 tx_bd_cons_upd /* word1 */;
- __le16 word2 /* word2 */;
- __le16 word3 /* word3 */;
- __le16 word4 /* word4 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 tx_q0_int_coallecing_timeset;
+ u8 byte3;
+ __le16 word0;
+ __le32 terminate_spqe;
+ __le32 reg1;
+ __le16 tx_bd_cons_upd;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
};
struct tstorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 /* bit2 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 /* bit3 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 /* bit4 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 /* bit5 */
-#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 /* timer0cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 /* timer1cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 /* cf4 */
-#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 /* cf5 */
-#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 /* cf6 */
-#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 /* cf7 */
-#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 /* cf8 */
-#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 /* cf9 */
-#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 /* cf10 */
-#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 /* cf0en */
-#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 /* cf1en */
-#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 /* cf4en */
-#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 /* cf5en */
-#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 /* cf6en */
-#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 /* cf7en */
-#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 /* cf8en */
-#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 /* cf9en */
-#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 /* cf10en */
-#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 /* rule6en */
-#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 reg3 /* reg3 */;
- __le32 reg4 /* reg4 */;
- __le32 reg5 /* reg5 */;
- __le32 reg6 /* reg6 */;
- __le32 reg7 /* reg7 */;
- __le32 reg8 /* reg8 */;
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 rx_bd_cons /* word0 */;
- u8 byte4 /* byte4 */;
- u8 byte5 /* byte5 */;
- __le16 rx_bd_prod /* word1 */;
- __le16 word2 /* conn_dpi */;
- __le16 word3 /* word3 */;
- __le32 reg9 /* reg9 */;
- __le32 reg10 /* reg10 */;
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 rx_bd_cons;
+ u8 byte4;
+ u8 byte5;
+ __le16 rx_bd_prod;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
};
struct ustorm_eth_conn_ag_ctx {
- u8 byte0 /* cdu_validation */;
- u8 byte1 /* state */;
- u8 flags0;
-#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 /* exist_in_qm0 */
-#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 /* exist_in_qm1 */
-#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 /* timer0cf */
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 /* timer1cf */
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 /* timer2cf */
-#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 /* timer_stop_all */
-#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 /* cf4 */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 /* cf5 */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 /* cf6 */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
u8 flags2;
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf0en */
-#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 /* cf1en */
-#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 /* cf2en */
-#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 /* cf3en */
-#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 /* cf4en */
-#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 /* cf5en */
-#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 /* cf6en */
-#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 /* rule0en */
-#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 /* rule1en */
-#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 /* rule2en */
-#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 /* rule3en */
-#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 /* rule4en */
-#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 /* rule5en */
-#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 /* rule6en */
-#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 /* rule7en */
-#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 /* rule8en */
-#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
- u8 byte2 /* byte2 */;
- u8 byte3 /* byte3 */;
- __le16 word0 /* conn_dpi */;
- __le16 tx_bd_cons /* word1 */;
- __le32 reg0 /* reg0 */;
- __le32 reg1 /* reg1 */;
- __le32 reg2 /* reg2 */;
- __le32 tx_int_coallecing_timeset /* reg3 */;
- __le16 tx_drv_bd_cons /* word2 */;
- __le16 rx_drv_cqe_cons /* word3 */;
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 tx_bd_cons;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 tx_int_coallecing_timeset;
+ __le16 tx_drv_bd_cons;
+ __le16 rx_drv_cqe_cons;
};
/* The eth storm context for the Ustorm */
@@ -2762,47 +3188,75 @@ struct mstorm_eth_conn_st_ctx {
/* eth connection context */
struct eth_conn_context {
- struct tstorm_eth_conn_st_ctx tstorm_st_context;
- struct regpair tstorm_st_padding[2];
- struct pstorm_eth_conn_st_ctx pstorm_st_context;
- struct xstorm_eth_conn_st_ctx xstorm_st_context;
- struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
- struct ystorm_eth_conn_st_ctx ystorm_st_context;
- struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
- struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
- struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
- struct ustorm_eth_conn_st_ctx ustorm_st_context;
- struct mstorm_eth_conn_st_ctx mstorm_st_context;
+ struct tstorm_eth_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+ struct pstorm_eth_conn_st_ctx pstorm_st_context;
+ struct xstorm_eth_conn_st_ctx xstorm_st_context;
+ struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct ystorm_eth_conn_st_ctx ystorm_st_context;
+ struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_eth_conn_st_ctx ustorm_st_context;
+ struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
+/* opcodes for the event ring */
+enum eth_event_opcode {
+ ETH_EVENT_UNUSED,
+ ETH_EVENT_VPORT_START,
+ ETH_EVENT_VPORT_UPDATE,
+ ETH_EVENT_VPORT_STOP,
+ ETH_EVENT_TX_QUEUE_START,
+ ETH_EVENT_TX_QUEUE_STOP,
+ ETH_EVENT_RX_QUEUE_START,
+ ETH_EVENT_RX_QUEUE_UPDATE,
+ ETH_EVENT_RX_QUEUE_STOP,
+ ETH_EVENT_FILTERS_UPDATE,
+ ETH_EVENT_RESERVED,
+ ETH_EVENT_RESERVED2,
+ ETH_EVENT_RESERVED3,
+ ETH_EVENT_RX_ADD_UDP_FILTER,
+ ETH_EVENT_RX_DELETE_UDP_FILTER,
+ ETH_EVENT_RESERVED4,
+ ETH_EVENT_RESERVED5,
+ MAX_ETH_EVENT_OPCODE
+};
+
+/* Classify rule types in E2/E3 */
enum eth_filter_action {
+ ETH_FILTER_ACTION_UNUSED,
ETH_FILTER_ACTION_REMOVE,
ETH_FILTER_ACTION_ADD,
ETH_FILTER_ACTION_REMOVE_ALL,
MAX_ETH_FILTER_ACTION
};
+/* Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$ */
struct eth_filter_cmd {
- u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */;
- u8 vport_id /* the vport id */;
- u8 action /* filter command action: add/remove/replace */;
- u8 reserved0;
- __le32 vni;
- __le16 mac_lsb;
- __le16 mac_mid;
- __le16 mac_msb;
- __le16 vlan_id;
-};
-
+ u8 type;
+ u8 vport_id;
+ u8 action;
+ u8 reserved0;
+ __le32 vni;
+ __le16 mac_lsb;
+ __le16 mac_mid;
+ __le16 mac_msb;
+ __le16 vlan_id;
+};
+
+/* $$KEEP_ENDIANNESS$$ */
struct eth_filter_cmd_header {
- u8 rx;
- u8 tx;
- u8 cmd_cnt;
- u8 assert_on_error;
- u8 reserved1[4];
+ u8 rx;
+ u8 tx;
+ u8 cmd_cnt;
+ u8 assert_on_error;
+ u8 reserved1[4];
};
+/* Ethernet filter types: mac/vlan/pair */
enum eth_filter_type {
+ ETH_FILTER_TYPE_UNUSED,
ETH_FILTER_TYPE_MAC,
ETH_FILTER_TYPE_VLAN,
ETH_FILTER_TYPE_PAIR,
@@ -2815,463 +3269,3515 @@ enum eth_filter_type {
MAX_ETH_FILTER_TYPE
};
+/* Ethernet Ramrod Command IDs */
enum eth_ramrod_cmd_id {
ETH_RAMROD_UNUSED,
- ETH_RAMROD_VPORT_START /* VPort Start Ramrod */,
- ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */,
- ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */,
- ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */,
- ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */,
- ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */,
- ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */,
- ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */,
- ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */,
- ETH_RAMROD_RESERVED,
- ETH_RAMROD_RESERVED2,
- ETH_RAMROD_RESERVED3,
- ETH_RAMROD_RESERVED4,
- ETH_RAMROD_RESERVED5,
- ETH_RAMROD_RESERVED6,
- ETH_RAMROD_RESERVED7,
- ETH_RAMROD_RESERVED8,
+ ETH_RAMROD_VPORT_START,
+ ETH_RAMROD_VPORT_UPDATE,
+ ETH_RAMROD_VPORT_STOP,
+ ETH_RAMROD_RX_QUEUE_START,
+ ETH_RAMROD_RX_QUEUE_STOP,
+ ETH_RAMROD_TX_QUEUE_START,
+ ETH_RAMROD_TX_QUEUE_STOP,
+ ETH_RAMROD_FILTERS_UPDATE,
+ ETH_RAMROD_RX_QUEUE_UPDATE,
+ ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION,
+ ETH_RAMROD_RX_ADD_OPENFLOW_FILTER,
+ ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER,
+ ETH_RAMROD_RX_ADD_UDP_FILTER,
+ ETH_RAMROD_RX_DELETE_UDP_FILTER,
+ ETH_RAMROD_RX_CREATE_GFT_ACTION,
+ ETH_RAMROD_GFT_UPDATE_FILTER,
MAX_ETH_RAMROD_CMD_ID
};
+/* return code from eth sp ramrods */
+struct eth_return_code {
+ u8 value;
+#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F
+#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0
+#define ETH_RETURN_CODE_RESERVED_MASK 0x3
+#define ETH_RETURN_CODE_RESERVED_SHIFT 5
+#define ETH_RETURN_CODE_RX_TX_MASK 0x1
+#define ETH_RETURN_CODE_RX_TX_SHIFT 7
+};
+
+/* What to do in case an error occurs */
enum eth_tx_err {
- ETH_TX_ERR_DROP /* Drop erronous packet. */,
+ ETH_TX_ERR_DROP,
ETH_TX_ERR_ASSERT_MALICIOUS,
MAX_ETH_TX_ERR
};
+/* Array of the different error type behaviors */
struct eth_tx_err_vals {
__le16 values;
-#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1
-#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0
-#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1
-#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1
-#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1
-#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2
-#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1
-#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3
-#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1
-#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
-#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1
-#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
-#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
-#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
-#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
-#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
-};
-
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1
+#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1
+#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1
+#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1
+#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1
+#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6
+#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF
+#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7
+};
+
+/* vport rss configuration data */
struct eth_vport_rss_config {
__le16 capabilities;
-#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
-#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
-#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
-#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
-#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
-#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
-#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
-#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
-#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF
-#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7
- u8 rss_id;
- u8 rss_mode;
- u8 update_rss_key;
- u8 update_rss_ind_table;
- u8 update_rss_capabilities;
- u8 tbl_size;
- __le32 reserved2[2];
- __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
- __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
- __le32 reserved3[2];
-};
-
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1
+#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF
+#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7
+ u8 rss_id;
+ u8 rss_mode;
+ u8 update_rss_key;
+ u8 update_rss_ind_table;
+ u8 update_rss_capabilities;
+ u8 tbl_size;
+ __le32 reserved2[2];
+ __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+
+ __le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
+ __le32 reserved3[2];
+};
+
+/* eth vport RSS mode */
enum eth_vport_rss_mode {
ETH_VPORT_RSS_MODE_DISABLED,
ETH_VPORT_RSS_MODE_REGULAR,
MAX_ETH_VPORT_RSS_MODE
};
+/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */
struct eth_vport_rx_mode {
__le16 state;
-#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
-#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
-#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
-#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
-#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
-#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
-#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
-#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
-#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1
+#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5
+#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF
+#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6
__le16 reserved2[3];
};
+/* Command for setting tpa parameters */
struct eth_vport_tpa_param {
- u8 tpa_ipv4_en_flg;
- u8 tpa_ipv6_en_flg;
- u8 tpa_ipv4_tunn_en_flg;
- u8 tpa_ipv6_tunn_en_flg;
- u8 tpa_pkt_split_flg;
- u8 tpa_hdr_data_split_flg;
- u8 tpa_gro_consistent_flg;
- u8 tpa_max_aggs_num;
- u16 tpa_max_size;
- u16 tpa_min_size_to_start;
- u16 tpa_min_size_to_cont;
- u8 max_buff_num;
- u8 reserved;
+ u8 tpa_ipv4_en_flg;
+ u8 tpa_ipv6_en_flg;
+ u8 tpa_ipv4_tunn_en_flg;
+ u8 tpa_ipv6_tunn_en_flg;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg;
+ u8 tpa_gro_consistent_flg;
+
+ u8 tpa_max_aggs_num;
+
+ __le16 tpa_max_size;
+ __le16 tpa_min_size_to_start;
+
+ __le16 tpa_min_size_to_cont;
+ u8 max_buff_num;
+ u8 reserved;
};
+/* Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ */
struct eth_vport_tx_mode {
__le16 state;
-#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
-#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
-#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
-#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
-#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
-#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
-#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
-#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
-#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
-#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1
+#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4
+#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF
+#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5
__le16 reserved2[3];
};
+/* Ramrod data for rx queue start ramrod */
struct rx_queue_start_ramrod_data {
- __le16 rx_queue_id;
- __le16 num_of_pbl_pages;
- __le16 bd_max_bytes;
- __le16 sb_id;
- u8 sb_index;
- u8 vport_id;
- u8 default_rss_queue_flg;
- u8 complete_cqe_flg;
- u8 complete_event_flg;
- u8 stats_counter_id;
- u8 pin_context;
- u8 pxp_tph_valid_bd;
- u8 pxp_tph_valid_pkt;
- u8 pxp_st_hint;
- __le16 pxp_st_index;
- u8 pmd_mode;
- u8 notify_en;
- u8 toggle_val;
- u8 reserved[7];
- __le16 reserved1;
- struct regpair cqe_pbl_addr;
- struct regpair bd_base;
- struct regpair reserved2;
-};
-
+ __le16 rx_queue_id;
+ __le16 num_of_pbl_pages;
+ __le16 bd_max_bytes;
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 default_rss_queue_flg;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 stats_counter_id;
+ u8 pin_context;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ u8 pxp_st_hint;
+
+ __le16 pxp_st_index;
+ u8 pmd_mode;
+
+ u8 notify_en;
+ u8 toggle_val;
+
+ u8 vf_rx_prod_index;
+
+ u8 reserved[6];
+ __le16 reserved1;
+ struct regpair cqe_pbl_addr;
+ struct regpair bd_base;
+ struct regpair reserved2;
+};
+
+/* Ramrod data for rx queue start ramrod */
struct rx_queue_stop_ramrod_data {
- __le16 rx_queue_id;
- u8 complete_cqe_flg;
- u8 complete_event_flg;
- u8 vport_id;
- u8 reserved[3];
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 vport_id;
+ u8 reserved[3];
};
+/* Ramrod data for rx queue update ramrod */
struct rx_queue_update_ramrod_data {
- __le16 rx_queue_id;
- u8 complete_cqe_flg;
- u8 complete_event_flg;
- u8 vport_id;
- u8 reserved[4];
- u8 reserved1;
- u8 reserved2;
- u8 reserved3;
- __le16 reserved4;
- __le16 reserved5;
+ __le16 rx_queue_id;
+ u8 complete_cqe_flg;
+ u8 complete_event_flg;
+ u8 vport_id;
+ u8 reserved[4];
+ u8 reserved1;
+ u8 reserved2;
+ u8 reserved3;
+ __le16 reserved4;
+ __le16 reserved5;
struct regpair reserved6;
};
-struct tx_queue_start_ramrod_data {
- __le16 sb_id;
- u8 sb_index;
- u8 vport_id;
- u8 reserved0;
- u8 stats_counter_id;
- __le16 qm_pq_id;
- u8 flags;
-#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
-#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
-#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
-#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
-#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
-#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
- u8 pxp_st_hint;
- u8 pxp_tph_valid_bd;
- u8 pxp_tph_valid_pkt;
- __le16 pxp_st_index;
- __le16 comp_agg_size;
- __le16 queue_zone_id;
- __le16 test_dup_count;
- __le16 pbl_size;
- __le16 tx_queue_id;
- struct regpair pbl_base_addr;
- struct regpair bd_cons_address;
+/* Ramrod data for rx Add UDP Filter */
+struct rx_udp_filter_data {
+ __le16 action_icid;
+ __le16 vlan_id;
+ u8 ip_type;
+ u8 tenant_id_exists;
+ __le16 reserved1;
+ __le32 ip_dst_addr[4];
+ __le32 ip_src_addr[4];
+ __le16 udp_dst_port;
+ __le16 udp_src_port;
+ __le32 tenant_id;
};
+/* Ramrod data for rx queue start ramrod */
+struct tx_queue_start_ramrod_data {
+ __le16 sb_id;
+ u8 sb_index;
+ u8 vport_id;
+ u8 reserved0;
+ u8 stats_counter_id;
+ __le16 qm_pq_id;
+ u8 flags;
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1
+#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3
+#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6
+ u8 pxp_st_hint;
+ u8 pxp_tph_valid_bd;
+ u8 pxp_tph_valid_pkt;
+ __le16 pxp_st_index;
+ __le16 comp_agg_size;
+ __le16 queue_zone_id;
+ __le16 test_dup_count;
+ __le16 pbl_size;
+ __le16 tx_queue_id;
+
+ struct regpair pbl_base_addr;
+ struct regpair bd_cons_address;
+};
+
+/* Ramrod data for tx queue stop ramrod */
struct tx_queue_stop_ramrod_data {
__le16 reserved[4];
};
+/* Ramrod data for vport update ramrod */
struct vport_filter_update_ramrod_data {
- struct eth_filter_cmd_header filter_cmd_hdr;
- struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
+ struct eth_filter_cmd_header filter_cmd_hdr;
+ struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT];
};
+/* Ramrod data for vport start ramrod */
struct vport_start_ramrod_data {
- u8 vport_id;
- u8 sw_fid;
- __le16 mtu;
- u8 drop_ttl0_en;
- u8 inner_vlan_removal_en;
- struct eth_vport_rx_mode rx_mode;
- struct eth_vport_tx_mode tx_mode;
- struct eth_vport_tpa_param tpa_param;
- __le16 default_vlan;
- u8 tx_switching_en;
- u8 anti_spoofing_en;
- u8 default_vlan_en;
- u8 handle_ptp_pkts;
- u8 silent_vlan_removal_en;
- u8 untagged;
- struct eth_tx_err_vals tx_err_behav;
- u8 zero_placement_offset;
- u8 reserved[7];
-};
-
+ u8 vport_id;
+ u8 sw_fid;
+ __le16 mtu;
+ u8 drop_ttl0_en;
+ u8 inner_vlan_removal_en;
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ struct eth_vport_tpa_param tpa_param;
+ __le16 default_vlan;
+ u8 tx_switching_en;
+ u8 anti_spoofing_en;
+
+ u8 default_vlan_en;
+
+ u8 handle_ptp_pkts;
+ u8 silent_vlan_removal_en;
+ u8 untagged;
+ struct eth_tx_err_vals tx_err_behav;
+
+ u8 zero_placement_offset;
+ u8 ctl_frame_mac_check_en;
+ u8 ctl_frame_ethtype_check_en;
+ u8 reserved[5];
+};
+
+/* Ramrod data for vport stop ramrod */
struct vport_stop_ramrod_data {
- u8 vport_id;
- u8 reserved[7];
+ u8 vport_id;
+ u8 reserved[7];
};
+/* Ramrod data for vport update ramrod */
struct vport_update_ramrod_data_cmn {
- u8 vport_id;
- u8 update_rx_active_flg;
- u8 rx_active_flg;
- u8 update_tx_active_flg;
- u8 tx_active_flg;
- u8 update_rx_mode_flg;
- u8 update_tx_mode_flg;
- u8 update_approx_mcast_flg;
- u8 update_rss_flg;
- u8 update_inner_vlan_removal_en_flg;
- u8 inner_vlan_removal_en;
- u8 update_tpa_param_flg;
- u8 update_tpa_en_flg;
- u8 update_tx_switching_en_flg;
- u8 tx_switching_en;
- u8 update_anti_spoofing_en_flg;
- u8 anti_spoofing_en;
- u8 update_handle_ptp_pkts;
- u8 handle_ptp_pkts;
- u8 update_default_vlan_en_flg;
- u8 default_vlan_en;
- u8 update_default_vlan_flg;
- __le16 default_vlan;
- u8 update_accept_any_vlan_flg;
- u8 accept_any_vlan;
- u8 silent_vlan_removal_en;
- u8 update_mtu_flg;
- __le16 mtu;
- u8 reserved[2];
+ u8 vport_id;
+ u8 update_rx_active_flg;
+ u8 rx_active_flg;
+ u8 update_tx_active_flg;
+ u8 tx_active_flg;
+ u8 update_rx_mode_flg;
+ u8 update_tx_mode_flg;
+ u8 update_approx_mcast_flg;
+
+ u8 update_rss_flg;
+ u8 update_inner_vlan_removal_en_flg;
+
+ u8 inner_vlan_removal_en;
+ u8 update_tpa_param_flg;
+ u8 update_tpa_en_flg;
+ u8 update_tx_switching_en_flg;
+
+ u8 tx_switching_en;
+ u8 update_anti_spoofing_en_flg;
+
+ u8 anti_spoofing_en;
+ u8 update_handle_ptp_pkts;
+
+ u8 handle_ptp_pkts;
+ u8 update_default_vlan_en_flg;
+
+ u8 default_vlan_en;
+
+ u8 update_default_vlan_flg;
+
+ __le16 default_vlan;
+ u8 update_accept_any_vlan_flg;
+
+ u8 accept_any_vlan;
+ u8 silent_vlan_removal_en;
+ u8 update_mtu_flg;
+
+ __le16 mtu;
+ u8 reserved[2];
};
struct vport_update_ramrod_mcast {
__le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
};
+/* Ramrod data for vport update ramrod */
struct vport_update_ramrod_data {
- struct vport_update_ramrod_data_cmn common;
- struct eth_vport_rx_mode rx_mode;
- struct eth_vport_tx_mode tx_mode;
- struct eth_vport_tpa_param tpa_param;
- struct vport_update_ramrod_mcast approx_mcast;
- struct eth_vport_rss_config rss_config;
+ struct vport_update_ramrod_data_cmn common;
+
+ struct eth_vport_rx_mode rx_mode;
+ struct eth_vport_tx_mode tx_mode;
+ struct eth_vport_tpa_param tpa_param;
+ struct vport_update_ramrod_mcast approx_mcast;
+ struct eth_vport_rss_config rss_config;
};
-#define VF_MAX_STATIC 192 /* In case of K2 */
+struct mstorm_rdma_task_st_ctx {
+ struct regpair temp[4];
+};
-#define MCP_GLOB_PATH_MAX 2
-#define MCP_PORT_MAX 2 /* Global */
-#define MCP_GLOB_PORT_MAX 4 /* Global */
-#define MCP_GLOB_FUNC_MAX 16 /* Global */
+struct rdma_close_func_ramrod_data {
+ u8 cnq_start_offset;
+ u8 num_cnqs;
+ u8 vf_id;
+ u8 vf_valid;
+ u8 reserved[4];
+};
+
+struct rdma_cnq_params {
+ __le16 sb_num;
+ u8 sb_index;
+ u8 num_pbl_pages;
+ __le32 reserved;
+ struct regpair pbl_base_addr;
+ __le16 queue_zone_num;
+ u8 reserved1[6];
+};
+
+struct rdma_create_cq_ramrod_data {
+ struct regpair cq_handle;
+ struct regpair pbl_addr;
+ __le32 max_cqes;
+ __le16 pbl_num_pages;
+ __le16 dpi;
+ u8 is_two_level_pbl;
+ u8 cnq_id;
+ u8 pbl_log_page_size;
+ u8 toggle_bit;
+ __le16 int_timeout;
+ __le16 reserved1;
+};
+
+struct rdma_deregister_tid_ramrod_data {
+ __le32 itid;
+ __le32 reserved;
+};
+
+struct rdma_destroy_cq_output_params {
+ __le16 cnq_num;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+struct rdma_destroy_cq_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+enum rdma_event_opcode {
+ RDMA_EVENT_UNUSED,
+ RDMA_EVENT_FUNC_INIT,
+ RDMA_EVENT_FUNC_CLOSE,
+ RDMA_EVENT_REGISTER_MR,
+ RDMA_EVENT_DEREGISTER_MR,
+ RDMA_EVENT_CREATE_CQ,
+ RDMA_EVENT_RESIZE_CQ,
+ RDMA_EVENT_DESTROY_CQ,
+ RDMA_EVENT_CREATE_SRQ,
+ RDMA_EVENT_MODIFY_SRQ,
+ RDMA_EVENT_DESTROY_SRQ,
+ MAX_RDMA_EVENT_OPCODE
+};
+
+enum rdma_fw_return_code {
+ RDMA_RETURN_OK = 0,
+ RDMA_RETURN_REGISTER_MR_BAD_STATE_ERR,
+ RDMA_RETURN_DEREGISTER_MR_BAD_STATE_ERR,
+ RDMA_RETURN_RESIZE_CQ_ERR,
+ RDMA_RETURN_NIG_DRAIN_REQ,
+ MAX_RDMA_FW_RETURN_CODE
+};
+
+struct rdma_init_func_hdr {
+ u8 cnq_start_offset;
+ u8 num_cnqs;
+ u8 cq_ring_mode;
+ u8 cnp_vlan_priority;
+ __le32 cnp_send_timeout;
+ u8 cnp_dscp;
+ u8 vf_id;
+ u8 vf_valid;
+ u8 reserved[5];
+};
+
+struct rdma_init_func_ramrod_data {
+ struct rdma_init_func_hdr params_header;
+ struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
+};
+
+enum rdma_ramrod_cmd_id {
+ RDMA_RAMROD_UNUSED,
+ RDMA_RAMROD_FUNC_INIT,
+ RDMA_RAMROD_FUNC_CLOSE,
+ RDMA_RAMROD_REGISTER_MR,
+ RDMA_RAMROD_DEREGISTER_MR,
+ RDMA_RAMROD_CREATE_CQ,
+ RDMA_RAMROD_RESIZE_CQ,
+ RDMA_RAMROD_DESTROY_CQ,
+ RDMA_RAMROD_CREATE_SRQ,
+ RDMA_RAMROD_MODIFY_SRQ,
+ RDMA_RAMROD_DESTROY_SRQ,
+ MAX_RDMA_RAMROD_CMD_ID
+};
+
+struct rdma_register_tid_ramrod_data {
+ __le32 flags;
+#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_MASK 0x3FFFF
+#define RDMA_REGISTER_TID_RAMROD_DATA_MAX_ID_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PAGE_SIZE_LOG_SHIFT 18
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_TWO_LEVEL_PBL_SHIFT 23
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ZERO_BASED_SHIFT 24
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_PHY_MR_SHIFT 25
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_READ_SHIFT 26
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_WRITE_SHIFT 27
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_REMOTE_ATOMIC_SHIFT 28
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_WRITE_SHIFT 29
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_LOCAL_READ_SHIFT 30
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_ENABLE_MW_BIND_SHIFT 31
+ u8 flags1;
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_MASK 0x1F
+#define RDMA_REGISTER_TID_RAMROD_DATA_PBL_PAGE_SIZE_LOG_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_MASK 0x7
+#define RDMA_REGISTER_TID_RAMROD_DATA_TID_TYPE_SHIFT 5
+ u8 flags2;
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DMA_MR_SHIFT 0
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_MASK 0x1
+#define RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG_SHIFT 1
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_MASK 0x3F
+#define RDMA_REGISTER_TID_RAMROD_DATA_RESERVED1_SHIFT 2
+ u8 key;
+ u8 length_hi;
+ u8 vf_id;
+ u8 vf_valid;
+ __le16 pd;
+ __le32 length_lo;
+ __le32 itid;
+ __le32 reserved2;
+ struct regpair va;
+ struct regpair pbl_base;
+ struct regpair dif_error_addr;
+ struct regpair dif_runt_addr;
+ __le32 reserved3[2];
+};
+
+struct rdma_resize_cq_output_params {
+ __le32 old_cq_cons;
+ __le32 old_cq_prod;
+};
+
+struct rdma_resize_cq_ramrod_data {
+ u8 flags;
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_TOGGLE_BIT_SHIFT 0
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_MASK 0x1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_IS_TWO_LEVEL_PBL_SHIFT 1
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_MASK 0x3F
+#define RDMA_RESIZE_CQ_RAMROD_DATA_RESERVED_SHIFT 2
+ u8 pbl_log_page_size;
+ __le16 pbl_num_pages;
+ __le32 max_cqes;
+ struct regpair pbl_addr;
+ struct regpair output_params_addr;
+};
+
+struct rdma_srq_context {
+ struct regpair temp[8];
+};
+
+struct rdma_srq_create_ramrod_data {
+ struct regpair pbl_base_addr;
+ __le16 pages_in_srq_pbl;
+ __le16 pd_id;
+ struct rdma_srq_id srq_id;
+ __le16 page_size;
+ __le16 reserved1;
+ __le32 reserved2;
+ struct regpair producers_addr;
+};
+
+struct rdma_srq_destroy_ramrod_data {
+ struct rdma_srq_id srq_id;
+ __le32 reserved;
+};
+
+struct rdma_srq_modify_ramrod_data {
+ struct rdma_srq_id srq_id;
+ __le32 wqe_limit;
+};
+
+struct ystorm_rdma_task_st_ctx {
+ struct regpair temp[4];
+};
+
+struct ystorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 msem_ctx_upd_seq;
+ u8 flags0;
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 key;
+ __le32 mw_cnt;
+ u8 ref_cnt_seq;
+ u8 ctx_upd_seq;
+ __le16 dif_flags;
+ __le16 tx_ref_count;
+ __le16 last_used_ltid;
+ __le16 parent_mr_lo;
+ __le16 parent_mr_hi;
+ __le32 fbo_lo;
+ __le32 fbo_hi;
+};
+
+struct mstorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+ u8 flags2;
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+ u8 key;
+ __le32 mw_cnt;
+ u8 ref_cnt_seq;
+ u8 ctx_upd_seq;
+ __le16 dif_flags;
+ __le16 tx_ref_count;
+ __le16 last_used_ltid;
+ __le16 parent_mr_lo;
+ __le16 parent_mr_hi;
+ __le32 fbo_lo;
+ __le32 fbo_hi;
+};
+
+struct ustorm_rdma_task_st_ctx {
+ struct regpair temp[2];
+};
+
+struct ustorm_rdma_task_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ __le16 icid;
+ u8 flags0;
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RUNT_VALID_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
+ u8 flags1;
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+ u8 flags2;
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
+ u8 flags3;
+#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+ __le32 dif_err_intervals;
+ __le32 dif_error_1st_interval;
+ __le32 reg2;
+ __le32 dif_runt_value;
+ __le32 reg4;
+ __le32 reg5;
+};
+
+struct rdma_task_context {
+ struct ystorm_rdma_task_st_ctx ystorm_st_context;
+ struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
+ struct tdif_task_context tdif_context;
+ struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
+ struct mstorm_rdma_task_st_ctx mstorm_st_context;
+ struct rdif_task_context rdif_context;
+ struct ustorm_rdma_task_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+ struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
+};
+
+enum rdma_tid_type {
+ RDMA_TID_REGISTERED_MR,
+ RDMA_TID_FMR,
+ RDMA_TID_MW_TYPE1,
+ RDMA_TID_MW_TYPE2A,
+ MAX_RDMA_TID_TYPE
+};
+
+struct mstorm_rdma_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct tstorm_rdma_conn_ag_ctx {
+ u8 reserved0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags2;
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ u8 byte4;
+ u8 byte5;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
+};
+
+struct tstorm_rdma_task_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ __le16 word0;
+ u8 flags0;
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+ u8 flags1;
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
+ u8 flags2;
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
+ u8 flags3;
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
+ u8 byte2;
+ __le16 word1;
+ __le32 reg0;
+ u8 byte3;
+ u8 byte4;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg1;
+ __le32 reg2;
+};
+
+struct ustorm_rdma_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+ u8 flags3;
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 conn_dpi;
+ __le16 word1;
+ __le32 cq_cons;
+ __le32 cq_se_prod;
+ __le32 cq_prod;
+ __le32 reg3;
+ __le16 int_timeout;
+ __le16 word3;
+};
+
+struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT1_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT2_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT4_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT5_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT6_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT8_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT9_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT10_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT11_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT12_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT13_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT14_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF16EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF18EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF19EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF20EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF21EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE0EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE2EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE3EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE4EN_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_MIGRATION_SHIFT 0
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_BIT17_SHIFT 1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_DPM_PORT_NUM_SHIFT 2
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_RESERVED_SHIFT 4
+#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORMROCECONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_MASK 0x3
+#define XSTORMROCECONNAGCTXDQEXTLDPART_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 snd_nxt_psn;
+ __le32 reg4;
+};
+
+struct xstorm_rdma_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT2_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT2_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_BIT5_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT5_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_RDMA_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_RDMA_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORM_RDMA_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_RDMA_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_RDMA_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORM_RDMA_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_MIGRATION_SHIFT 0
+#define XSTORM_RDMA_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_RDMA_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_RDMA_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_RDMA_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 snd_nxt_psn;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+};
+
+struct ystorm_rdma_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_RDMA_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_RDMA_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_RDMA_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_RDMA_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct mstorm_roce_conn_st_ctx {
+ struct regpair temp[6];
+};
+
+struct pstorm_roce_conn_st_ctx {
+ struct regpair temp[16];
+};
+
+struct ystorm_roce_conn_st_ctx {
+ struct regpair temp[2];
+};
+
+struct xstorm_roce_conn_st_ctx {
+ struct regpair temp[22];
+};
+
+struct tstorm_roce_conn_st_ctx {
+ struct regpair temp[30];
+};
+
+struct ustorm_roce_conn_st_ctx {
+ struct regpair temp[12];
+};
+
+struct roce_conn_context {
+ struct ystorm_roce_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_roce_conn_st_ctx pstorm_st_context;
+ struct xstorm_roce_conn_st_ctx xstorm_st_context;
+ struct regpair xstorm_st_padding[2];
+ struct xstorm_rdma_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_rdma_conn_ag_ctx tstorm_ag_context;
+ struct timers_context timer_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct tstorm_roce_conn_st_ctx tstorm_st_context;
+ struct mstorm_roce_conn_st_ctx mstorm_st_context;
+ struct ustorm_roce_conn_st_ctx ustorm_st_context;
+ struct regpair ustorm_st_padding[2];
+};
+
+struct roce_create_qp_req_ramrod_data {
+ __le16 flags;
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FMR_AND_RESERVED_EN_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_SIGNALED_COMP_SHIFT 3
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_PRI_SHIFT 4
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 7
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 8
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 12
+ u8 max_ord;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 orq_num_pages;
+ __le16 p_key;
+ __le32 flow_label;
+ __le32 dst_qp_id;
+ __le32 ack_timeout_val;
+ __le32 initial_psn;
+ __le16 mtu;
+ __le16 pd;
+ __le16 sq_num_pages;
+ __le16 reseved2;
+ struct regpair sq_pbl_addr;
+ struct regpair orq_pbl_addr;
+ __le16 local_mac_addr[3];
+ __le16 remote_mac_addr[3];
+ __le16 vlan_id;
+ __le16 udp_src_port;
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+ struct regpair qp_handle_for_cqe;
+ struct regpair qp_handle_for_async;
+ u8 stats_counter_id;
+ u8 reserved3[7];
+ __le32 cq_cid;
+ __le16 physical_queue0;
+ __le16 dpi;
+};
+
+struct roce_create_qp_resp_ramrod_data {
+ __le16 flags;
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_MASK 0x3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ROCE_FLAVOR_SHIFT 0
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 2
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 3
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 4
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_SRQ_FLG_SHIFT 5
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_E2E_FLOW_CONTROL_EN_SHIFT 6
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED0_SHIFT 7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_PRI_SHIFT 8
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 11
+ u8 max_ird;
+ u8 traffic_class;
+ u8 hop_limit;
+ u8 irq_num_pages;
+ __le16 p_key;
+ __le32 flow_label;
+ __le32 dst_qp_id;
+ u8 stats_counter_id;
+ u8 reserved1;
+ __le16 mtu;
+ __le32 initial_psn;
+ __le16 pd;
+ __le16 rq_num_pages;
+ struct rdma_srq_id srq_id;
+ struct regpair rq_pbl_addr;
+ struct regpair irq_pbl_addr;
+ __le16 local_mac_addr[3];
+ __le16 remote_mac_addr[3];
+ __le16 vlan_id;
+ __le16 udp_src_port;
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+ struct regpair qp_handle_for_cqe;
+ struct regpair qp_handle_for_async;
+ __le32 reserved2[2];
+ __le32 cq_cid;
+ __le16 physical_queue0;
+ __le16 dpi;
+};
+
+struct roce_destroy_qp_req_output_params {
+ __le32 num_bound_mw;
+ __le32 reserved;
+};
+
+struct roce_destroy_qp_req_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+struct roce_destroy_qp_resp_output_params {
+ __le32 num_invalidated_mw;
+ __le32 reserved;
+};
+
+struct roce_destroy_qp_resp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+enum roce_event_opcode {
+ ROCE_EVENT_CREATE_QP = 11,
+ ROCE_EVENT_MODIFY_QP,
+ ROCE_EVENT_QUERY_QP,
+ ROCE_EVENT_DESTROY_QP,
+ MAX_ROCE_EVENT_OPCODE
+};
+
+struct roce_modify_qp_req_ramrod_data {
+ __le16 flags;
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MOVE_TO_SQD_FLG_SHIFT 1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_EN_SQD_ASYNC_NOTIFY_SHIFT 2
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_P_KEY_FLG_SHIFT 3
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 4
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_MAX_ORD_FLG_SHIFT 5
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_FLG_SHIFT 6
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_FLG_SHIFT 7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ACK_TIMEOUT_FLG_SHIFT 8
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_FLG_SHIFT 9
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x7
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 13
+ u8 fields;
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_MASK 0xF
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RNR_NAK_CNT_SHIFT 4
+ u8 max_ord;
+ u8 traffic_class;
+ u8 hop_limit;
+ __le16 p_key;
+ __le32 flow_label;
+ __le32 ack_timeout_val;
+ __le16 mtu;
+ __le16 reserved2;
+ __le32 reserved3[3];
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+};
+
+struct roce_modify_qp_resp_ramrod_data {
+ __le16 flags;
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MOVE_TO_ERR_FLG_SHIFT 0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_RD_EN_SHIFT 1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_WR_EN_SHIFT 2
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ATOMIC_EN_SHIFT 3
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_P_KEY_FLG_SHIFT 4
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_ADDRESS_VECTOR_FLG_SHIFT 5
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MAX_IRD_FLG_SHIFT 6
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_FLG_SHIFT 7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_FLG_SHIFT 8
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x3F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 10
+ u8 fields;
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_MASK 0x1F
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_MIN_RNR_NAK_TIMER_SHIFT 3
+ u8 max_ird;
+ u8 traffic_class;
+ u8 hop_limit;
+ __le16 p_key;
+ __le32 flow_label;
+ __le16 mtu;
+ __le16 reserved2;
+ __le32 src_gid[4];
+ __le32 dst_gid[4];
+};
+
+struct roce_query_qp_req_output_params {
+ __le32 psn;
+ __le32 flags;
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_MASK 0x1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_SQ_DRAINING_FLG_SHIFT 1
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_QUERY_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 2
+};
+
+struct roce_query_qp_req_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+struct roce_query_qp_resp_output_params {
+ __le32 psn;
+ __le32 err_flag;
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_MASK 0x1
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG_SHIFT 0
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
+#define ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+};
+
+struct roce_query_qp_resp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+enum roce_ramrod_cmd_id {
+ ROCE_RAMROD_CREATE_QP = 11,
+ ROCE_RAMROD_MODIFY_QP,
+ ROCE_RAMROD_QUERY_QP,
+ ROCE_RAMROD_DESTROY_QP,
+ MAX_ROCE_RAMROD_CMD_ID
+};
+
+struct mstorm_roce_req_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct mstorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+enum roce_flavor {
+ PLAIN_ROCE /* RoCE v1 */ ,
+ RROCE_IPV4 /* RoCE v2 (Routable RoCE) over ipv4 */ ,
+ RROCE_IPV6 /* RoCE v2 (Routable RoCE) over ipv6 */ ,
+ MAX_ROCE_FLAVOR
+};
+
+struct tstorm_roce_req_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURED_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURED_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
+ u8 flags1;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags2;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
+ u8 flags3;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 snd_nxt_psn;
+ __le32 snd_max_psn;
+ __le32 orq_prod;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 tx_cqe_error_type;
+ u8 orq_cache_idx;
+ __le16 snd_sq_cons_th;
+ u8 byte4;
+ u8 byte5;
+ __le16 snd_sq_cons;
+ __le16 word2;
+ __le16 word3;
+ __le32 reg9;
+ __le32 reg10;
+};
+
+struct tstorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags2;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 psn_and_rxmit_id_echo;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 tx_async_error_type;
+ u8 byte3;
+ __le16 rq_cons;
+ u8 byte4;
+ u8 byte5;
+ __le16 rq_prod;
+ __le16 conn_dpi;
+ __le16 irq_cons;
+ __le32 num_invlidated_mw;
+ __le32 reg10;
+};
+
+struct ustorm_roce_req_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct ustorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct xstorm_roce_req_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 sq_cmp_cons;
+ __le16 sq_cons;
+ __le16 sq_prod;
+ __le16 word5;
+ __le16 conn_dpi;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 lsn;
+ __le32 ssn;
+ __le32 snd_una_psn;
+ __le32 snd_nxt_psn;
+ __le32 reg4;
+ __le32 orq_cons_th;
+ __le32 orq_cons;
+};
+
+struct xstorm_roce_resp_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
+ u8 flags1;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+ u8 flags2;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
+ u8 flags3;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+ u8 flags4;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
+ u8 flags7;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 word1;
+ __le16 irq_prod;
+ __le16 word3;
+ __le16 word4;
+ __le16 word5;
+ __le16 irq_cons;
+ u8 rxmit_opcode;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 rxmit_psn_and_id;
+ __le32 rxmit_bytes_length;
+ __le32 psn;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 msn_and_syndrome;
+};
+
+struct ystorm_roce_req_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct ystorm_roce_resp_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct ystorm_iscsi_conn_st_ctx {
+ __le32 reserved[4];
+};
+
+struct pstorm_iscsi_tcp_conn_st_ctx {
+ __le32 tcp[32];
+ __le32 iscsi[4];
+};
+
+struct xstorm_iscsi_tcp_conn_st_ctx {
+ __le32 reserved_iscsi[40];
+ __le32 reserved_tcp[4];
+};
+
+struct xstorm_iscsi_conn_ag_ctx {
+ u8 cdu_validation;
+ u8 state;
+ u8 flags0;
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
+ u8 flags2;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+ u8 flags3;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
+ u8 flags6;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+ u8 flags7;
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ __le16 dummy_dorq_var;
+ __le16 sq_cons;
+ __le16 sq_prod;
+ __le16 word5;
+ __le16 slow_io_total_data_tx_update;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 more_to_send_seq;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 hq_scan_next_relevant_ack;
+ __le16 r2tq_prod;
+ __le16 r2tq_cons;
+ __le16 hq_prod;
+ __le16 hq_cons;
+ __le32 remain_seq;
+ __le32 bytes_to_next_pdu;
+ __le32 hq_tcp_seq;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 byte16;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 exp_stat_sn;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+};
+
+struct tstorm_iscsi_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
+ u8 flags1;
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+};
+
+struct ustorm_iscsi_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+struct tstorm_iscsi_conn_st_ctx {
+ __le32 reserved[40];
+};
+
+struct mstorm_iscsi_conn_ag_ctx {
+ u8 reserved;
+ u8 state;
+ u8 flags0;
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+struct mstorm_iscsi_tcp_conn_st_ctx {
+ __le32 reserved_tcp[20];
+ __le32 reserved_iscsi[8];
+};
+
+struct ustorm_iscsi_conn_st_ctx {
+ __le32 reserved[52];
+};
+
+struct iscsi_conn_context {
+ struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
+ struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct pb_context xpb2_context;
+ struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
+ struct regpair xstorm_st_padding[2];
+ struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
+ struct regpair tstorm_ag_padding[2];
+ struct timers_context timer_context;
+ struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
+ struct pb_context upb_context;
+ struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
+ struct regpair tstorm_st_padding[2];
+ struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
+ struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
+ struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
+};
+
+struct iscsi_init_ramrod_params {
+ struct iscsi_spe_func_init iscsi_init_spe;
+ struct tcp_init_params tcp_init;
+};
+
+struct ystorm_iscsi_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le32 reg0;
+ __le32 reg1;
+ __le16 word1;
+ __le16 word2;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+#define VF_MAX_STATIC 192
+
+#define MCP_GLOB_PATH_MAX 2
+#define MCP_PORT_MAX 2
+#define MCP_GLOB_PORT_MAX 4
+#define MCP_GLOB_FUNC_MAX 16
-typedef u32 offsize_t; /* In DWORDS !!! */
/* Offset from the beginning of the MCP scratchpad */
-#define OFFSIZE_OFFSET_SHIFT 0
-#define OFFSIZE_OFFSET_MASK 0x0000ffff
+#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_MASK 0x0000ffff
/* Size of specific element (not the whole array if any) */
-#define OFFSIZE_SIZE_SHIFT 16
-#define OFFSIZE_SIZE_MASK 0xffff0000
+#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_MASK 0xffff0000
-/* SECTION_OFFSET is calculating the offset in bytes out of offsize */
-#define SECTION_OFFSET(_offsize) ((((_offsize & \
- OFFSIZE_OFFSET_MASK) >> \
- OFFSIZE_OFFSET_SHIFT) << 2))
+#define SECTION_OFFSET(_offsize) ((((_offsize & \
+ OFFSIZE_OFFSET_MASK) >> \
+ OFFSIZE_OFFSET_SHIFT) << 2))
-/* QED_SECTION_SIZE is calculating the size in bytes out of offsize */
-#define QED_SECTION_SIZE(_offsize) (((_offsize & \
- OFFSIZE_SIZE_MASK) >> \
- OFFSIZE_SIZE_SHIFT) << 2)
+#define QED_SECTION_SIZE(_offsize) (((_offsize & \
+ OFFSIZE_SIZE_MASK) >> \
+ OFFSIZE_SIZE_SHIFT) << 2)
-/* SECTION_ADDR returns the GRC addr of a section, given offsize and index
- * within section.
- */
-#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
- SECTION_OFFSET(_offsize) + \
- (QED_SECTION_SIZE(_offsize) * idx))
+#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
+ SECTION_OFFSET(_offsize) + \
+ (QED_SECTION_SIZE(_offsize) * idx))
+
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
+ (_pub_base + offsetof(struct mcp_public_data, sections[_section]))
-/* SECTION_OFFSIZE_ADDR returns the GRC addr to the offsize address.
- * Use offsetof, since the OFFSETUP collide with the firmware definition
- */
-#define SECTION_OFFSIZE_ADDR(_pub_base, _section) (_pub_base + \
- offsetof(struct \
- mcp_public_data, \
- sections[_section]))
/* PHY configuration */
-struct pmm_phy_cfg {
- u32 speed;
-#define PMM_SPEED_AUTONEG 0
-
- u32 pause; /* bitmask */
-#define PMM_PAUSE_NONE 0x0
-#define PMM_PAUSE_AUTONEG 0x1
-#define PMM_PAUSE_RX 0x2
-#define PMM_PAUSE_TX 0x4
-
- u32 adv_speed; /* Default should be the speed_cap_mask */
- u32 loopback_mode;
-#define PMM_LOOPBACK_NONE 0
-#define PMM_LOOPBACK_INT_PHY 1
-#define PMM_LOOPBACK_EXT_PHY 2
-#define PMM_LOOPBACK_EXT 3
-#define PMM_LOOPBACK_MAC 4
-
- /* features */
+struct eth_phy_cfg {
+ u32 speed;
+#define ETH_SPEED_AUTONEG 0
+#define ETH_SPEED_SMARTLINQ 0x8
+
+ u32 pause;
+#define ETH_PAUSE_NONE 0x0
+#define ETH_PAUSE_AUTONEG 0x1
+#define ETH_PAUSE_RX 0x2
+#define ETH_PAUSE_TX 0x4
+
+ u32 adv_speed;
+ u32 loopback_mode;
+#define ETH_LOOPBACK_NONE (0)
+#define ETH_LOOPBACK_INT_PHY (1)
+#define ETH_LOOPBACK_EXT_PHY (2)
+#define ETH_LOOPBACK_EXT (3)
+#define ETH_LOOPBACK_MAC (4)
+
u32 feature_config_flags;
+#define ETH_EEE_MODE_ADV_LPI (1 << 0)
};
struct port_mf_cfg {
- u32 dynamic_cfg; /* device control channel */
-#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
-#define PORT_MF_CFG_OV_TAG_SHIFT 0
-#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
-
- u32 reserved[1];
-};
-
-/* DO NOT add new fields in the middle
- * MUST be synced with struct pmm_stats_map
- */
-struct pmm_stats {
- u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter*/
- u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter*/
- u64 r255;
- u64 r511;
- u64 r1023;
- u64 r1518;
- u64 r1522;
- u64 r2047;
- u64 r4095;
- u64 r9216;
- u64 r16383;
- u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter*/
- u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter*/
- u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter*/
- u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter*/
- u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter*/
- u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */
- u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter*/
- u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */
- u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */
- u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */
- u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */
- u64 t127;
- u64 t255;
- u64 t511;
- u64 t1023;
- u64 t1518;
- u64 t2047;
- u64 t4095;
- u64 t9216;
- u64 t16383;
- u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */
- u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */
- u64 tlpiec;
- u64 tncl;
- u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */
- u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */
- u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */
- u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */
- u64 rxpok;
- u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */
- u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */
- u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */
- u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */
- u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */
+ u32 dynamic_cfg;
+#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
+
+ u32 reserved[1];
+};
+
+struct eth_stats {
+ u64 r64;
+ u64 r127;
+ u64 r255;
+ u64 r511;
+ u64 r1023;
+ u64 r1518;
+ u64 r1522;
+ u64 r2047;
+ u64 r4095;
+ u64 r9216;
+ u64 r16383;
+ u64 rfcs;
+ u64 rxcf;
+ u64 rxpf;
+ u64 rxpp;
+ u64 raln;
+ u64 rfcr;
+ u64 rovr;
+ u64 rjbr;
+ u64 rund;
+ u64 rfrg;
+ u64 t64;
+ u64 t127;
+ u64 t255;
+ u64 t511;
+ u64 t1023;
+ u64 t1518;
+ u64 t2047;
+ u64 t4095;
+ u64 t9216;
+ u64 t16383;
+ u64 txpf;
+ u64 txpp;
+ u64 tlpiec;
+ u64 tncl;
+ u64 rbyte;
+ u64 rxuca;
+ u64 rxmca;
+ u64 rxbca;
+ u64 rxpok;
+ u64 tbyte;
+ u64 txuca;
+ u64 txmca;
+ u64 txbca;
+ u64 txcf;
};
struct brb_stats {
- u64 brb_truncate[8];
- u64 brb_discard[8];
+ u64 brb_truncate[8];
+ u64 brb_discard[8];
};
struct port_stats {
- struct brb_stats brb;
- struct pmm_stats pmm;
+ struct brb_stats brb;
+ struct eth_stats eth;
};
-#define CMT_TEAM0 0
-#define CMT_TEAM1 1
-#define CMT_TEAM_MAX 2
-
struct couple_mode_teaming {
u8 port_cmt[MCP_GLOB_PORT_MAX];
-#define PORT_CMT_IN_TEAM BIT(0)
+#define PORT_CMT_IN_TEAM (1 << 0)
-#define PORT_CMT_PORT_ROLE BIT(1)
-#define PORT_CMT_PORT_INACTIVE (0 << 1)
-#define PORT_CMT_PORT_ACTIVE BIT(1)
+#define PORT_CMT_PORT_ROLE (1 << 1)
+#define PORT_CMT_PORT_INACTIVE (0 << 1)
+#define PORT_CMT_PORT_ACTIVE (1 << 1)
-#define PORT_CMT_TEAM_MASK BIT(2)
-#define PORT_CMT_TEAM0 (0 << 2)
-#define PORT_CMT_TEAM1 BIT(2)
+#define PORT_CMT_TEAM_MASK (1 << 2)
+#define PORT_CMT_TEAM0 (0 << 2)
+#define PORT_CMT_TEAM1 (1 << 2)
};
-/**************************************
-* LLDP and DCBX HSI structures
-**************************************/
-#define LLDP_CHASSIS_ID_STAT_LEN 4
-#define LLDP_PORT_ID_STAT_LEN 4
-#define DCBX_MAX_APP_PROTOCOL 32
-#define MAX_SYSTEM_LLDP_TLV_DATA 32
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL 32
+#define MAX_SYSTEM_LLDP_TLV_DATA 32
-enum lldp_agent_e {
+enum _lldp_agent {
LLDP_NEAREST_BRIDGE = 0,
LLDP_NEAREST_NON_TPMR_BRIDGE,
LLDP_NEAREST_CUSTOMER_BRIDGE,
@@ -3280,676 +6786,525 @@ enum lldp_agent_e {
struct lldp_config_params_s {
u32 config;
-#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
-#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
-#define LLDP_CONFIG_HOLD_MASK 0x00000f00
-#define LLDP_CONFIG_HOLD_SHIFT 8
-#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
-#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
-#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
-#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
-#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
-#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
- u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
- u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
+#define LLDP_CONFIG_HOLD_MASK 0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT 8
+#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
+#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
+#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
+ u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
};
struct lldp_status_params_s {
- u32 prefix_seq_num;
- u32 status; /* TBD */
-
- /* Holds remote Chassis ID TLV header, subtype and 9B of payload. */
- u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
-
- /* Holds remote Port ID TLV header, subtype and 9B of payload. */
- u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
- u32 suffix_seq_num;
+ u32 prefix_seq_num;
+ u32 status;
+ u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 suffix_seq_num;
};
struct dcbx_ets_feature {
u32 flags;
-#define DCBX_ETS_ENABLED_MASK 0x00000001
-#define DCBX_ETS_ENABLED_SHIFT 0
-#define DCBX_ETS_WILLING_MASK 0x00000002
-#define DCBX_ETS_WILLING_SHIFT 1
-#define DCBX_ETS_ERROR_MASK 0x00000004
-#define DCBX_ETS_ERROR_SHIFT 2
-#define DCBX_ETS_CBS_MASK 0x00000008
-#define DCBX_ETS_CBS_SHIFT 3
-#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
-#define DCBX_ETS_MAX_TCS_SHIFT 4
- u32 pri_tc_tbl[1];
-#define DCBX_ISCSI_OOO_TC 4
-#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
- u32 tc_bw_tbl[2];
- u32 tc_tsa_tbl[2];
-#define DCBX_ETS_TSA_STRICT 0
-#define DCBX_ETS_TSA_CBS 1
-#define DCBX_ETS_TSA_ETS 2
+#define DCBX_ETS_ENABLED_MASK 0x00000001
+#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_WILLING_MASK 0x00000002
+#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_ERROR_MASK 0x00000004
+#define DCBX_ETS_ERROR_SHIFT 2
+#define DCBX_ETS_CBS_MASK 0x00000008
+#define DCBX_ETS_CBS_SHIFT 3
+#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT 4
+#define DCBX_ISCSI_OOO_TC_MASK 0x00000f00
+#define DCBX_ISCSI_OOO_TC_SHIFT 8
+ u32 pri_tc_tbl[1];
+#define DCBX_ISCSI_OOO_TC (4)
+
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_ISCSI_OOO_TC + 1)
+#define DCBX_CEE_STRICT_PRIORITY 0xf
+ u32 tc_bw_tbl[2];
+ u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT 0
+#define DCBX_ETS_TSA_CBS 1
+#define DCBX_ETS_TSA_ETS 2
};
struct dcbx_app_priority_entry {
u32 entry;
-#define DCBX_APP_PRI_MAP_MASK 0x000000ff
-#define DCBX_APP_PRI_MAP_SHIFT 0
-#define DCBX_APP_PRI_0 0x01
-#define DCBX_APP_PRI_1 0x02
-#define DCBX_APP_PRI_2 0x04
-#define DCBX_APP_PRI_3 0x08
-#define DCBX_APP_PRI_4 0x10
-#define DCBX_APP_PRI_5 0x20
-#define DCBX_APP_PRI_6 0x40
-#define DCBX_APP_PRI_7 0x80
-#define DCBX_APP_SF_MASK 0x00000300
-#define DCBX_APP_SF_SHIFT 8
-#define DCBX_APP_SF_ETHTYPE 0
-#define DCBX_APP_SF_PORT 1
-#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
-#define DCBX_APP_PROTOCOL_ID_SHIFT 16
-};
-
-/* FW structure in BE */
+#define DCBX_APP_PRI_MAP_MASK 0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT 0
+#define DCBX_APP_PRI_0 0x01
+#define DCBX_APP_PRI_1 0x02
+#define DCBX_APP_PRI_2 0x04
+#define DCBX_APP_PRI_3 0x08
+#define DCBX_APP_PRI_4 0x10
+#define DCBX_APP_PRI_5 0x20
+#define DCBX_APP_PRI_6 0x40
+#define DCBX_APP_PRI_7 0x80
+#define DCBX_APP_SF_MASK 0x00000300
+#define DCBX_APP_SF_SHIFT 8
+#define DCBX_APP_SF_ETHTYPE 0
+#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_SF_IEEE_MASK 0x0000f000
+#define DCBX_APP_SF_IEEE_SHIFT 12
+#define DCBX_APP_SF_IEEE_RESERVED 0
+#define DCBX_APP_SF_IEEE_ETHTYPE 1
+#define DCBX_APP_SF_IEEE_TCP_PORT 2
+#define DCBX_APP_SF_IEEE_UDP_PORT 3
+#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
+
+#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT 16
+};
+
struct dcbx_app_priority_feature {
u32 flags;
-#define DCBX_APP_ENABLED_MASK 0x00000001
-#define DCBX_APP_ENABLED_SHIFT 0
-#define DCBX_APP_WILLING_MASK 0x00000002
-#define DCBX_APP_WILLING_SHIFT 1
-#define DCBX_APP_ERROR_MASK 0x00000004
-#define DCBX_APP_ERROR_SHIFT 2
-/* Not in use
- * #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00
- * #define DCBX_APP_DEFAULT_PRI_SHIFT 8
- */
-#define DCBX_APP_MAX_TCS_MASK 0x0000f000
-#define DCBX_APP_MAX_TCS_SHIFT 12
-#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
-#define DCBX_APP_NUM_ENTRIES_SHIFT 16
+#define DCBX_APP_ENABLED_MASK 0x00000001
+#define DCBX_APP_ENABLED_SHIFT 0
+#define DCBX_APP_WILLING_MASK 0x00000002
+#define DCBX_APP_WILLING_SHIFT 1
+#define DCBX_APP_ERROR_MASK 0x00000004
+#define DCBX_APP_ERROR_SHIFT 2
+#define DCBX_APP_MAX_TCS_MASK 0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT 12
+#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT 16
struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
};
-/* FW structure in BE */
struct dcbx_features {
- /* PG feature */
struct dcbx_ets_feature ets;
+ u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
+
+#define DCBX_PFC_FLAGS_MASK 0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT 8
+#define DCBX_PFC_CAPS_MASK 0x00000f00
+#define DCBX_PFC_CAPS_SHIFT 8
+#define DCBX_PFC_MBC_MASK 0x00004000
+#define DCBX_PFC_MBC_SHIFT 14
+#define DCBX_PFC_WILLING_MASK 0x00008000
+#define DCBX_PFC_WILLING_SHIFT 15
+#define DCBX_PFC_ENABLED_MASK 0x00010000
+#define DCBX_PFC_ENABLED_SHIFT 16
+#define DCBX_PFC_ERROR_MASK 0x00020000
+#define DCBX_PFC_ERROR_SHIFT 17
- /* PFC feature */
- u32 pfc;
-#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
-#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
-
-#define DCBX_PFC_FLAGS_MASK 0x0000ff00
-#define DCBX_PFC_FLAGS_SHIFT 8
-#define DCBX_PFC_CAPS_MASK 0x00000f00
-#define DCBX_PFC_CAPS_SHIFT 8
-#define DCBX_PFC_MBC_MASK 0x00004000
-#define DCBX_PFC_MBC_SHIFT 14
-#define DCBX_PFC_WILLING_MASK 0x00008000
-#define DCBX_PFC_WILLING_SHIFT 15
-#define DCBX_PFC_ENABLED_MASK 0x00010000
-#define DCBX_PFC_ENABLED_SHIFT 16
-#define DCBX_PFC_ERROR_MASK 0x00020000
-#define DCBX_PFC_ERROR_SHIFT 17
-
- /* APP feature */
struct dcbx_app_priority_feature app;
};
struct dcbx_local_params {
u32 config;
-#define DCBX_CONFIG_VERSION_MASK 0x00000003
-#define DCBX_CONFIG_VERSION_SHIFT 0
-#define DCBX_CONFIG_VERSION_DISABLED 0
-#define DCBX_CONFIG_VERSION_IEEE 1
-#define DCBX_CONFIG_VERSION_CEE 2
+#define DCBX_CONFIG_VERSION_MASK 0x00000007
+#define DCBX_CONFIG_VERSION_SHIFT 0
+#define DCBX_CONFIG_VERSION_DISABLED 0
+#define DCBX_CONFIG_VERSION_IEEE 1
+#define DCBX_CONFIG_VERSION_CEE 2
+#define DCBX_CONFIG_VERSION_STATIC 4
- u32 flags;
- struct dcbx_features features;
+ u32 flags;
+ struct dcbx_features features;
};
struct dcbx_mib {
- u32 prefix_seq_num;
- u32 flags;
- struct dcbx_features features;
- u32 suffix_seq_num;
+ u32 prefix_seq_num;
+ u32 flags;
+ struct dcbx_features features;
+ u32 suffix_seq_num;
};
struct lldp_system_tlvs_buffer_s {
- u16 valid;
- u16 length;
- u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+ u16 valid;
+ u16 length;
+ u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
};
-/**************************************/
-/* */
-/* P U B L I C G L O B A L */
-/* */
-/**************************************/
-struct public_global {
- u32 max_path;
-#define MAX_PATH_BIG_BEAR 2
-#define MAX_PATH_K2 1
- u32 max_ports;
-#define MODE_1P 1
-#define MODE_2P 2
-#define MODE_3P 3
-#define MODE_4P 4
- u32 debug_mb_offset;
- u32 phymod_dbg_mb_offset;
- struct couple_mode_teaming cmt;
- s32 internal_temperature;
- u32 mfw_ver;
- u32 running_bundle_id;
+struct dcb_dscp_map {
+ u32 flags;
+#define DCB_DSCP_ENABLE_MASK 0x1
+#define DCB_DSCP_ENABLE_SHIFT 0
+#define DCB_DSCP_ENABLE 1
+ u32 dscp_pri_map[8];
};
-/**************************************/
-/* */
-/* P U B L I C P A T H */
-/* */
-/**************************************/
+struct public_global {
+ u32 max_path;
+ u32 max_ports;
+ u32 debug_mb_offset;
+ u32 phymod_dbg_mb_offset;
+ struct couple_mode_teaming cmt;
+ s32 internal_temperature;
+ u32 mfw_ver;
+ u32 running_bundle_id;
+ s32 external_temperature;
+ u32 mdump_reason;
+};
-/****************************************************************************
-* Shared Memory 2 Region *
-****************************************************************************/
-/* The fw_flr_ack is actually built in the following way: */
-/* 8 bit: PF ack */
-/* 128 bit: VF ack */
-/* 8 bit: ios_dis_ack */
-/* In order to maintain endianity in the mailbox hsi, we want to keep using */
-/* u32. The fw must have the VF right after the PF since this is how it */
-/* access arrays(it expects always the VF to reside after the PF, and that */
-/* makes the calculation much easier for it. ) */
-/* In order to answer both limitations, and keep the struct small, the code */
-/* will abuse the structure defined here to achieve the actual partition */
-/* above */
-/****************************************************************************/
struct fw_flr_mb {
- u32 aggint;
- u32 opgen_addr;
- u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */
-#define ACCUM_ACK_PF_BASE 0
-#define ACCUM_ACK_PF_SHIFT 0
-
-#define ACCUM_ACK_VF_BASE 8
-#define ACCUM_ACK_VF_SHIFT 3
-
-#define ACCUM_ACK_IOV_DIS_BASE 256
-#define ACCUM_ACK_IOV_DIS_SHIFT 8
+ u32 aggint;
+ u32 opgen_addr;
+ u32 accum_ack;
};
struct public_path {
- struct fw_flr_mb flr_mb;
- u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
-
- u32 process_kill;
-#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
-#define PROCESS_KILL_COUNTER_SHIFT 0
-#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
-#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
+ struct fw_flr_mb flr_mb;
+ u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
+
+ u32 process_kill;
+#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT 0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
};
-/**************************************/
-/* */
-/* P U B L I C P O R T */
-/* */
-/**************************************/
-
-/****************************************************************************
-* Driver <-> FW Mailbox *
-****************************************************************************/
-
struct public_port {
- u32 validity_map; /* 0x0 (4*2 = 0x8) */
-
- /* validity bits */
-#define MCP_VALIDITY_PCI_CFG 0x00100000
-#define MCP_VALIDITY_MB 0x00200000
-#define MCP_VALIDITY_DEV_INFO 0x00400000
-#define MCP_VALIDITY_RESERVED 0x00000007
-
- /* One licensing bit should be set */
-#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038
-#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008
-#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010
-#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020
-
- /* Active MFW */
-#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000
-#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0
-#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040
-#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0
+ u32 validity_map;
u32 link_status;
-#define LINK_STATUS_LINK_UP \
- 0x00000001
-#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
-
-#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
-
-#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
-#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
-
-#define LINK_STATUS_PFC_ENABLED \
- 0x00000100
-#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
-#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
-#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
-#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
-#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
-#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
-#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
-#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
-
-#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
-#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
-#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18)
-#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
-#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
-
-#define LINK_STATUS_SFP_TX_FAULT \
- 0x00100000
-#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
-#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
-
- u32 link_status1;
- u32 ext_phy_fw_version;
- u32 drv_phy_cfg_addr;
-
- u32 port_stx;
-
- u32 stat_nig_timer;
-
- struct port_mf_cfg port_mf_config;
- struct port_stats stats;
-
- u32 media_type;
-#define MEDIA_UNSPECIFIED 0x0
-#define MEDIA_SFPP_10G_FIBER 0x1
-#define MEDIA_XFP_FIBER 0x2
-#define MEDIA_DA_TWINAX 0x3
-#define MEDIA_BASE_T 0x4
-#define MEDIA_SFP_1G_FIBER 0x5
-#define MEDIA_KR 0xf0
-#define MEDIA_NOT_PRESENT 0xff
+#define LINK_STATUS_LINK_UP 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+
+#define LINK_STATUS_PFC_ENABLED 0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
+
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+
+#define LINK_STATUS_SFP_TX_FAULT 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
+
+ u32 link_status1;
+ u32 ext_phy_fw_version;
+ u32 drv_phy_cfg_addr;
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ struct port_mf_cfg port_mf_config;
+ struct port_stats stats;
+
+ u32 media_type;
+#define MEDIA_UNSPECIFIED 0x0
+#define MEDIA_SFPP_10G_FIBER 0x1
+#define MEDIA_XFP_FIBER 0x2
+#define MEDIA_DA_TWINAX 0x3
+#define MEDIA_BASE_T 0x4
+#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_MODULE_FIBER 0x6
+#define MEDIA_KR 0xf0
+#define MEDIA_NOT_PRESENT 0xff
u32 lfa_status;
-#define LFA_LINK_FLAP_REASON_OFFSET 0
-#define LFA_LINK_FLAP_REASON_MASK 0x000000ff
-#define LFA_NO_REASON (0 << 0)
-#define LFA_LINK_DOWN BIT(0)
-#define LFA_FORCE_INIT BIT(1)
-#define LFA_LOOPBACK_MISMATCH BIT(2)
-#define LFA_SPEED_MISMATCH BIT(3)
-#define LFA_FLOW_CTRL_MISMATCH BIT(4)
-#define LFA_ADV_SPEED_MISMATCH BIT(5)
-#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8
-#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00
-#define LINK_FLAP_COUNT_OFFSET 16
-#define LINK_FLAP_COUNT_MASK 0x00ff0000
-
- u32 link_change_count;
-
- /* LLDP params */
- struct lldp_config_params_s lldp_config_params[
- LLDP_MAX_LLDP_AGENTS];
- struct lldp_status_params_s lldp_status_params[
- LLDP_MAX_LLDP_AGENTS];
- struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+ u32 link_change_count;
+
+ struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
/* DCBX related MIB */
- struct dcbx_local_params local_admin_dcbx_mib;
- struct dcbx_mib remote_dcbx_mib;
- struct dcbx_mib operational_dcbx_mib;
+ struct dcbx_local_params local_admin_dcbx_mib;
+ struct dcbx_mib remote_dcbx_mib;
+ struct dcbx_mib operational_dcbx_mib;
- u32 fc_npiv_nvram_tbl_addr;
- u32 fc_npiv_nvram_tbl_size;
- u32 transceiver_data;
-#define PMM_TRANSCEIVER_STATE_MASK 0x000000FF
-#define PMM_TRANSCEIVER_STATE_SHIFT 0x00000000
-#define PMM_TRANSCEIVER_STATE_PRESENT 0x00000001
-};
+ u32 reserved[2];
+ u32 transceiver_data;
+#define ETH_TRANSCEIVER_STATE_MASK 0x000000FF
+#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
+#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
+#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
+#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
-/**************************************/
-/* */
-/* P U B L I C F U N C */
-/* */
-/**************************************/
+ u32 wol_info;
+ u32 wol_pkt_len;
+ u32 wol_pkt_details;
+ struct dcb_dscp_map dcb_dscp_map;
+};
struct public_func {
- u32 iscsi_boot_signature;
- u32 iscsi_boot_block_offset;
-
- u32 mtu_size;
- u32 c2s_pcp_map_lower;
- u32 c2s_pcp_map_upper;
- u32 c2s_pcp_map_default;
- u32 reserved[4];
-
- u32 config;
-
- /* E/R/I/D */
- /* function 0 of each port cannot be hidden */
-#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
-
-#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
-#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
-#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
+ u32 reserved0[2];
+
+ u32 mtu_size;
+
+ u32 reserved[7];
+
+ u32 config;
+#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
-#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
-#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
- /* MINBW, MAXBW */
- /* value range - 0..100, increments in 1 % */
-#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
-#define FUNC_MF_CFG_MIN_BW_SHIFT 8
-#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
-#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
-#define FUNC_MF_CFG_MAX_BW_SHIFT 16
-#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
+#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT 8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT 16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
- u32 status;
-#define FUNC_STATUS_VLINK_DOWN 0x00000001
+ u32 status;
+#define FUNC_STATUS_VLINK_DOWN 0x00000001
- u32 mac_upper; /* MAC */
-#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
-#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
-#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
- u32 mac_lower;
-#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+ u32 mac_upper;
+#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
- u32 fcoe_wwn_port_name_upper;
- u32 fcoe_wwn_port_name_lower;
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
- u32 fcoe_wwn_node_name_upper;
- u32 fcoe_wwn_node_name_lower;
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
- u32 ovlan_stag; /* tags */
-#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
-#define FUNC_MF_CFG_OV_STAG_SHIFT 0
-#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
+ u32 ovlan_stag;
+#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT 0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
- u32 pf_allocation; /* vf per pf */
+ u32 pf_allocation;
- u32 preserve_data; /* Will be used bt CCM */
+ u32 preserve_data;
- u32 driver_last_activity_ts;
+ u32 driver_last_activity_ts;
- u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */
+ u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32];
- u32 drv_id;
-#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
-#define DRV_ID_PDA_COMP_VER_SHIFT 0
+ u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT 0
-#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
-#define DRV_ID_MCP_HSI_VER_SHIFT 16
-#define DRV_ID_MCP_HSI_VER_CURRENT BIT(DRV_ID_MCP_HSI_VER_SHIFT)
+#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT 16
+#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT)
-#define DRV_ID_DRV_TYPE_MASK 0x7f000000
-#define DRV_ID_DRV_TYPE_SHIFT 24
-#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_MASK 0x7f000000
+#define DRV_ID_DRV_TYPE_SHIFT 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
-#define DRV_ID_DRV_INIT_HW_SHIFT 31
-#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT)
+#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
+#define DRV_ID_DRV_INIT_HW_SHIFT 31
+#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
};
-/**************************************/
-/* */
-/* P U B L I C M B */
-/* */
-/**************************************/
-/* This is the only section that the driver can write to, and each */
-/* Basically each driver request to set feature parameters,
- * will be done using a different command, which will be linked
- * to a specific data structure from the union below.
- * For huge strucuture, the common blank structure should be used.
- */
-
struct mcp_mac {
- u32 mac_upper; /* Upper 16 bits are always zeroes */
- u32 mac_lower;
+ u32 mac_upper;
+ u32 mac_lower;
};
struct mcp_val64 {
- u32 lo;
- u32 hi;
+ u32 lo;
+ u32 hi;
};
struct mcp_file_att {
- u32 nvm_start_addr;
- u32 len;
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+struct bist_nvm_image_att {
+ u32 return_code;
+ u32 image_type;
+ u32 nvm_start_addr;
+ u32 len;
};
#define MCP_DRV_VER_STR_SIZE 16
#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
#define MCP_DRV_NVM_BUF_LEN 32
struct drv_version_stc {
- u32 version;
- u8 name[MCP_DRV_VER_STR_SIZE - 4];
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+struct lan_stats_stc {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+ u32 rserved;
+};
+
+struct ocbb_data_stc {
+ u32 ocbb_host_addr;
+ u32 ocsd_host_addr;
+ u32 ocsd_req_update_interval;
+};
+
+#define MAX_NUM_OF_SENSORS 7
+struct temperature_status_stc {
+ u32 num_of_sensors;
+ u32 sensor[MAX_NUM_OF_SENSORS];
+};
+
+/* crash dump configuration header */
+struct mdump_config_stc {
+ u32 version;
+ u32 config;
+ u32 epoc;
+ u32 num_of_logs;
+ u32 valid_logs;
};
union drv_union_data {
- u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
- struct mcp_mac wol_mac;
+ u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
+ struct mcp_mac wol_mac;
- struct pmm_phy_cfg drv_phy_cfg;
+ struct eth_phy_cfg drv_phy_cfg;
- struct mcp_val64 val64; /* For PHY / AVS commands */
+ struct mcp_val64 val64;
- u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
- struct mcp_file_att file_att;
+ struct mcp_file_att file_att;
- u32 ack_vf_disabled[VF_MAX_STATIC / 32];
+ u32 ack_vf_disabled[VF_MAX_STATIC / 32];
- struct drv_version_stc drv_version;
+ struct drv_version_stc drv_version;
+
+ struct lan_stats_stc lan_stats;
+ u64 reserved_stats[11];
+ struct ocbb_data_stc ocbb_info;
+ struct temperature_status_stc temp_info;
+ struct bist_nvm_image_att nvm_image_att;
+ struct mdump_config_stc mdump_config;
};
struct public_drv_mb {
u32 drv_mb_header;
-#define DRV_MSG_CODE_MASK 0xffff0000
-#define DRV_MSG_CODE_LOAD_REQ 0x10000000
-#define DRV_MSG_CODE_LOAD_DONE 0x11000000
-#define DRV_MSG_CODE_INIT_HW 0x12000000
-#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
-#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
-#define DRV_MSG_CODE_INIT_PHY 0x22000000
- /* Params - FORCE - Reinitialize the link regardless of LFA */
- /* - DONT_CARE - Don't flap the link if up */
-#define DRV_MSG_CODE_LINK_RESET 0x23000000
-
-#define DRV_MSG_CODE_SET_LLDP 0x24000000
-#define DRV_MSG_CODE_SET_DCBX 0x25000000
-
-#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
-
-#define DRV_MSG_CODE_INITIATE_FLR 0x02000000
-#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
-#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
-#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
-#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
-#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
-#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
-#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
-#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000
-#define DRV_MSG_CODE_MCP_RESET 0x00090000
-#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000
-#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000
-#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000
-#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000
-#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000
-#define DRV_MSG_CODE_SET_VERSION 0x000f0000
-
-#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
-
-#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define DRV_MSG_CODE_MASK 0xffff0000
+#define DRV_MSG_CODE_LOAD_REQ 0x10000000
+#define DRV_MSG_CODE_LOAD_DONE 0x11000000
+#define DRV_MSG_CODE_INIT_HW 0x12000000
+#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
+#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
+#define DRV_MSG_CODE_INIT_PHY 0x22000000
+#define DRV_MSG_CODE_LINK_RESET 0x23000000
+#define DRV_MSG_CODE_SET_DCBX 0x25000000
+
+#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
+#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
+#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
+#define DRV_MSG_CODE_MCP_RESET 0x00090000
+#define DRV_MSG_CODE_SET_VERSION 0x000f0000
+
+#define DRV_MSG_CODE_BIST_TEST 0x001e0000
+#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
+
+#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
u32 drv_mb_param;
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+
+
+#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
+#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
+#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+
+#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
+#define DRV_MB_PARAM_BIST_RC_PASSED 1
+#define DRV_MB_PARAM_BIST_RC_FAILED 2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
- /* UNLOAD_REQ params */
-#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
-#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
-#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
-#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
-
- /* UNLOAD_DONE_params */
-#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001
-
- /* INIT_PHY params */
-#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
-#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
-
- /* LLDP / DCBX params*/
-#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
-#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
-#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
-#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
-#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
-#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
-
-#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
-#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
-
-#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
-#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
-
-#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0
-#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
-#define DRV_MB_PARAM_NVM_LEN_SHIFT 24
-#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
-
-#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0
-#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF
-#define DRV_MB_PARAM_PHY_LANE_SHIFT 16
-#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000
-#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29
-#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000
-#define DRV_MB_PARAM_PHY_PORT_SHIFT 30
-#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000
-
-/* configure vf MSIX params*/
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
-
-#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
-#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
-#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000FF
u32 fw_mb_header;
-#define FW_MSG_CODE_MASK 0xffff0000
-#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
-#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
-#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
-#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
-#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
-#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
-#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
-#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
-#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000
-#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000
-#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000
-#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000
-#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000
-#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000
-#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000
-#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
-#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
-#define FW_MSG_CODE_FLR_ACK 0x02000000
-#define FW_MSG_CODE_FLR_NACK 0x02100000
-
-#define FW_MSG_CODE_NVM_OK 0x00010000
-#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000
-#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000
-#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000
-#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000
-#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000
-#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000
-#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000
-#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000
-#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000
-#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000
-#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000
-#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000
-#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000
-#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000
-#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000
-#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000
-#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000
-#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
-#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000
-#define FW_MSG_CODE_PHY_OK 0x00110000
-#define FW_MSG_CODE_PHY_ERROR 0x00120000
-#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000
-#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000
-#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000
-#define FW_MSG_CODE_OK 0x00160000
-
-#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
-
- u32 fw_mb_param;
-
- u32 drv_pulse_mb;
-#define DRV_PULSE_SEQ_MASK 0x00007fff
-#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
-#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
+#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
+#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000
+#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
+#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
+#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
+#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
+#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
+#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
+#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
+#define FW_MSG_CODE_OK 0x00160000
+
+#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+
+ u32 fw_mb_param;
+
+ u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK 0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+
u32 mcp_pulse_mb;
-#define MCP_PULSE_SEQ_MASK 0x00007fff
-#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
-#define MCP_EVENT_MASK 0xffff0000
-#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+#define MCP_PULSE_SEQ_MASK 0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+#define MCP_EVENT_MASK 0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
union drv_union_data union_data;
};
-/* MFW - DRV MB */
-/**********************************************************************
-* Description
-* Incremental Aggregative
-* 8-bit MFW counter per message
-* 8-bit ack-counter per message
-* Capabilities
-* Provides up to 256 aggregative message per type
-* Provides 4 message types in dword
-* Message type pointers to byte offset
-* Backward Compatibility by using sizeof for the counters.
-* No lock requires for 32bit messages
-* Limitations:
-* In case of messages greater than 32bit, a dedicated mechanism(e.g lock)
-* is required to prevent data corruption.
-**********************************************************************/
enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_LINK_CHANGE,
MFW_DRV_MSG_FLR_FW_ACK_FAILED,
@@ -3957,37 +7312,33 @@ enum MFW_DRV_MSG_TYPE {
MFW_DRV_MSG_LLDP_DATA_UPDATED,
MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
- MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_RESERVED4,
MFW_DRV_MSG_BW_UPDATE,
- MFW_DRV_MSG_S_TAG_UPDATE,
- MFW_DRV_MSG_GET_LAN_STATS,
- MFW_DRV_MSG_GET_FCOE_STATS,
- MFW_DRV_MSG_GET_ISCSI_STATS,
- MFW_DRV_MSG_GET_RDMA_STATS,
- MFW_DRV_MSG_FAILURE_DETECTED,
+ MFW_DRV_MSG_BW_UPDATE5,
+ MFW_DRV_MSG_BW_UPDATE6,
+ MFW_DRV_MSG_BW_UPDATE7,
+ MFW_DRV_MSG_BW_UPDATE8,
+ MFW_DRV_MSG_BW_UPDATE9,
+ MFW_DRV_MSG_BW_UPDATE10,
MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+ MFW_DRV_MSG_BW_UPDATE11,
MFW_DRV_MSG_MAX
};
-#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
-#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
-#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
-#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
struct public_mfw_mb {
- u32 sup_msgs;
- u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
- u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+ u32 sup_msgs;
+ u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+ u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
};
-/**************************************/
-/* */
-/* P U B L I C D A T A */
-/* */
-/**************************************/
enum public_sections {
- PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */
- PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */
+ PUBLIC_DRV_MB,
+ PUBLIC_MFW_MB,
PUBLIC_GLOBAL,
PUBLIC_PATH,
PUBLIC_PORT,
@@ -3995,1076 +7346,179 @@ enum public_sections {
PUBLIC_MAX_SECTIONS
};
-struct drv_ver_info_stc {
- u32 ver;
- u8 name[32];
-};
-
struct mcp_public_data {
- /* The sections fields is an array */
- u32 num_sections;
- offsize_t sections[PUBLIC_MAX_SECTIONS];
- struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
- struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
- struct public_global global;
- struct public_path path[MCP_GLOB_PATH_MAX];
- struct public_port port[MCP_GLOB_PORT_MAX];
- struct public_func func[MCP_GLOB_FUNC_MAX];
- struct drv_ver_info_stc drv_info;
+ u32 num_sections;
+ u32 sections[PUBLIC_MAX_SECTIONS];
+ struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+ struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+ struct public_global global;
+ struct public_path path[MCP_GLOB_PATH_MAX];
+ struct public_port port[MCP_GLOB_PORT_MAX];
+ struct public_func func[MCP_GLOB_FUNC_MAX];
};
struct nvm_cfg_mac_address {
- u32 mac_addr_hi;
-#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
-#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
-
- u32 mac_addr_lo;
+ u32 mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
+ u32 mac_addr_lo;
};
-/******************************************
-* nvm_cfg1 structs
-******************************************/
-
struct nvm_cfg1_glob {
- u32 generic_cont0; /* 0x0 */
-#define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F
-#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0
-#define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0
-#define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1
-#define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2
-#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3
-#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
-#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
-#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
-#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
-#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
-#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
-#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
-#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0
-#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1
-#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000
-#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13
-#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000
-#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0
-#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1
-#define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000
-#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30
-#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0
-#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0
-#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1
-
- u32 engineering_change[3]; /* 0x4 */
-
- u32 manufacturing_id; /* 0x10 */
-
- u32 serial_number[4]; /* 0x14 */
-
- u32 pcie_cfg; /* 0x24 */
-#define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003
-#define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0
-#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0
-#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1
-#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0
-#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_DISABLED 0x1
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2
-#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_DISABLED 0x3
-#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_MASK 0x00000020
-#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_OFFSET 5
-#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_DISABLED 0x0
-#define NVM_CFG1_GLOB_PREVENT_PCIE_L1_MENTRY_ENABLED 0x1
-#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0
-#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2
-#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000
-#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21
-#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000
-#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29
-
- u32 mgmt_traffic; /* 0x28 */
-#define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001
-#define NVM_CFG1_GLOB_RESERVED60_OFFSET 0
-#define NVM_CFG1_GLOB_RESERVED60_100KHZ 0x0
-#define NVM_CFG1_GLOB_RESERVED60_400KHZ 0x1
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00
-#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9
-#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000
-#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1
-#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2
-
- u32 core_cfg; /* 0x2C */
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G 0x0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G 0x1
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G 0x2
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F 0x3
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E 0x4
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G 0x5
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G 0xB
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G 0xC
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G 0xD
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK 0x00000100
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET 8
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED 0x0
-#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED 0x1
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK 0x00000200
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET 9
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED 0x0
-#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED 0x1
-#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK 0x0003FC00
-#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET 10
-#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK 0x03FC0000
-#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET 18
-#define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000
-#define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26
-#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0
-#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP 0x1
-#define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0
-#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1
-
- u32 e_lane_cfg1; /* 0x30 */
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
-
- u32 e_lane_cfg2; /* 0x34 */
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
-#define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00
-#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8
-#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0
-#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1
-#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2
-#define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000
-#define NVM_CFG1_GLOB_NCSI_OFFSET 12
-#define NVM_CFG1_GLOB_NCSI_DISABLED 0x0
-#define NVM_CFG1_GLOB_NCSI_ENABLED 0x1
-
- u32 f_lane_cfg1; /* 0x38 */
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F
-#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0
-#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00
-#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000
-#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000
-#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000
-#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000
-#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000
-#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28
-
- u32 f_lane_cfg2; /* 0x3C */
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001
-#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002
-#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004
-#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008
-#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010
-#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020
-#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040
-#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080
-#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7
-
- u32 eagle_preemphasis; /* 0x40 */
-#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
-
- u32 eagle_driver_current; /* 0x44 */
-#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
-
- u32 falcon_preemphasis; /* 0x48 */
-#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24
-
- u32 falcon_driver_current; /* 0x4C */
-#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24
-
- u32 pci_id; /* 0x50 */
-#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF
-#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0
-
- u32 pci_subsys_id; /* 0x54 */
-#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF
-#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0
-#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000
-#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16
-
- u32 bar; /* 0x58 */
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE
-#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE
-#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF
-#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00
-#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8
-#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0
-#define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1
-#define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2
-#define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3
-#define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4
-#define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5
-#define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6
-#define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7
-#define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8
-#define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9
-#define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA
-#define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB
-#define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC
-#define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD
-#define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE
-#define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF
-
- u32 eagle_txfir_main; /* 0x5C */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
-
- u32 eagle_txfir_post; /* 0x60 */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
-
- u32 falcon_txfir_main; /* 0x64 */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24
-
- u32 falcon_txfir_post; /* 0x68 */
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF
-#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000
-#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24
-
- u32 manufacture_ver; /* 0x6C */
-#define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F
-#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0
-#define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0
-#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6
-#define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000
-#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12
-#define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000
-#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18
-#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000
-#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24
-
- u32 manufacture_time; /* 0x70 */
-#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F
-#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0
-#define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0
-#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6
-#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000
-#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12
-
- u32 led_global_settings; /* 0x74 */
-#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F
-#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0
-#define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0
-#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4
-#define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00
-#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8
-#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000
-#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12
-
- u32 generic_cont1; /* 0x78 */
-#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF
-#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0
-
- u32 mbi_version; /* 0x7C */
-#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF
-#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
-#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
-#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000
-#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
-
- u32 mbi_date; /* 0x80 */
-
- u32 misc_sig; /* 0x84 */
-
- /* Define the GPIO mapping to switch i2c mux */
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F
-#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20
- u32 device_capabilities; /* 0x88 */
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
- u32 power_dissipated; /* 0x8C */
- u32 power_consumed; /* 0x90 */
- u32 efi_version; /* 0x94 */
- u32 reserved[42]; /* 0x98 */
+ u32 generic_cont0;
+#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
+#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
+ u32 engineering_change[3];
+ u32 manufacturing_id;
+ u32 serial_number[4];
+ u32 pcie_cfg;
+ u32 mgmt_traffic;
+ u32 core_cfg;
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xB
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xC
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xD
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xE
+ u32 e_lane_cfg1;
+ u32 e_lane_cfg2;
+ u32 f_lane_cfg1;
+ u32 f_lane_cfg2;
+ u32 mps10_preemphasis;
+ u32 mps10_driver_current;
+ u32 mps25_preemphasis;
+ u32 mps25_driver_current;
+ u32 pci_id;
+ u32 pci_subsys_id;
+ u32 bar;
+ u32 mps10_txfir_main;
+ u32 mps10_txfir_post;
+ u32 mps25_txfir_main;
+ u32 mps25_txfir_post;
+ u32 manufacture_ver;
+ u32 manufacture_time;
+ u32 led_global_settings;
+ u32 generic_cont1;
+ u32 mbi_version;
+ u32 mbi_date;
+ u32 misc_sig;
+ u32 device_capabilities;
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
+ u32 power_dissipated;
+ u32 power_consumed;
+ u32 efi_version;
+ u32 multi_network_modes_capability;
+ u32 reserved[41];
};
struct nvm_cfg1_path {
- u32 reserved[30]; /* 0x0 */
+ u32 reserved[30];
};
struct nvm_cfg1_port {
- u32 reserved__m_relocated_to_option_123; /* 0x0 */
- u32 reserved__m_relocated_to_option_124; /* 0x4 */
- u32 generic_cont0; /* 0x8 */
-#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF
-#define NVM_CFG1_PORT_LED_MODE_OFFSET 0
-#define NVM_CFG1_PORT_LED_MODE_MAC1 0x0
-#define NVM_CFG1_PORT_LED_MODE_PHY1 0x1
-#define NVM_CFG1_PORT_LED_MODE_PHY2 0x2
-#define NVM_CFG1_PORT_LED_MODE_PHY3 0x3
-#define NVM_CFG1_PORT_LED_MODE_MAC2 0x4
-#define NVM_CFG1_PORT_LED_MODE_PHY4 0x5
-#define NVM_CFG1_PORT_LED_MODE_PHY5 0x6
-#define NVM_CFG1_PORT_LED_MODE_PHY6 0x7
-#define NVM_CFG1_PORT_LED_MODE_MAC3 0x8
-#define NVM_CFG1_PORT_LED_MODE_PHY7 0x9
-#define NVM_CFG1_PORT_LED_MODE_PHY8 0xA
-#define NVM_CFG1_PORT_LED_MODE_PHY9 0xB
-#define NVM_CFG1_PORT_LED_MODE_MAC4 0xC
-#define NVM_CFG1_PORT_LED_MODE_PHY10 0xD
-#define NVM_CFG1_PORT_LED_MODE_PHY11 0xE
-#define NVM_CFG1_PORT_LED_MODE_PHY12 0xF
-#define NVM_CFG1_PORT_ROCE_PRIORITY_MASK 0x0000FF00
-#define NVM_CFG1_PORT_ROCE_PRIORITY_OFFSET 8
-#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
-#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
-#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
-#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
-#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
-#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
- u32 pcie_cfg; /* 0xC */
-#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007
-#define NVM_CFG1_PORT_RESERVED15_OFFSET 0
-
- u32 features; /* 0x10 */
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0
-#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0
-#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1
-
- u32 speed_cap_mask; /* 0x14 */
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G 0x40
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20
-#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G 0x40
-
- u32 link_settings; /* 0x18 */
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G 0x7
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G 0x7
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2
-#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK 0x00004000
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED 0x0
-#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED 0x1
-
- u32 phy_cfg; /* 0x1C */
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8
-#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22
-#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31
-#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000
-#define NVM_CFG1_PORT_AN_MODE_OFFSET 24
-#define NVM_CFG1_PORT_AN_MODE_NONE 0x0
-#define NVM_CFG1_PORT_AN_MODE_CL73 0x1
-#define NVM_CFG1_PORT_AN_MODE_CL37 0x2
-#define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3
-#define NVM_CFG1_PORT_AN_MODE_CL37_BAM 0x4
-#define NVM_CFG1_PORT_AN_MODE_HPAM 0x5
-#define NVM_CFG1_PORT_AN_MODE_SGMII 0x6
-
- u32 mgmt_traffic; /* 0x20 */
-#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F
-#define NVM_CFG1_PORT_RESERVED61_OFFSET 0
-
- u32 ext_phy; /* 0x24 */
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0
-#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1
-#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00
-#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8
-
- u32 mba_cfg1; /* 0x28 */
-#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001
-#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0
-#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0
-#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1
-#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078
-#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0
-#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0
-#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1
-#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00
-#define NVM_CFG1_PORT_RESERVED5_OFFSET 9
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G 0x7
-#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000
-#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21
-
- u32 mba_cfg2; /* 0x2C */
-#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_RESERVED65_OFFSET 0
-#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000
-#define NVM_CFG1_PORT_RESERVED66_OFFSET 16
-
- u32 vf_cfg; /* 0x30 */
-#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF
-#define NVM_CFG1_PORT_RESERVED8_OFFSET 0
-#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000
-#define NVM_CFG1_PORT_RESERVED6_OFFSET 16
-
- struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */
-
- u32 led_port_settings; /* 0x3C */
-#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF
-#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0
-#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00
-#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8
-#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000
-#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20
-#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G 0x40
-
- u32 transceiver_00; /* 0x40 */
-
- /* Define for mapping of transceiver signal module absent */
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F
-#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20
- /* Define the GPIO mux settings to switch i2c mux to this port */
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000
-#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12
-
- u32 reserved[133]; /* 0x44 */
+ u32 reserved__m_relocated_to_option_123;
+ u32 reserved__m_relocated_to_option_124;
+ u32 generic_cont0;
+#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
+ u32 pcie_cfg;
+ u32 features;
+ u32 speed_cap_mask;
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+ u32 link_settings;
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
+ u32 phy_cfg;
+ u32 mgmt_traffic;
+ u32 ext_phy;
+ u32 mba_cfg1;
+ u32 mba_cfg2;
+ u32 vf_cfg;
+ struct nvm_cfg_mac_address lldp_mac_address;
+ u32 led_port_settings;
+ u32 transceiver_00;
+ u32 device_ids;
+ u32 board_cfg;
+ u32 mnm_10g_cap;
+ u32 mnm_10g_ctrl;
+ u32 mnm_10g_misc;
+ u32 mnm_25g_cap;
+ u32 mnm_25g_ctrl;
+ u32 mnm_25g_misc;
+ u32 mnm_40g_cap;
+ u32 mnm_40g_ctrl;
+ u32 mnm_40g_misc;
+ u32 mnm_50g_cap;
+ u32 mnm_50g_ctrl;
+ u32 mnm_50g_misc;
+ u32 mnm_100g_cap;
+ u32 mnm_100g_ctrl;
+ u32 mnm_100g_misc;
+ u32 reserved[116];
};
struct nvm_cfg1_func {
- struct nvm_cfg_mac_address mac_address; /* 0x0 */
-
- u32 rsrv1; /* 0x8 */
-#define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF
-#define NVM_CFG1_FUNC_RESERVED1_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000
-#define NVM_CFG1_FUNC_RESERVED2_OFFSET 16
-
- u32 rsrv2; /* 0xC */
-#define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF
-#define NVM_CFG1_FUNC_RESERVED3_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000
-#define NVM_CFG1_FUNC_RESERVED4_OFFSET 16
-
- u32 device_id; /* 0x10 */
-#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF
-#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000
-#define NVM_CFG1_FUNC_RESERVED77_OFFSET 16
-
- u32 cmn_cfg; /* 0x14 */
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_ISCSI_BOOT 0x3
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_FCOE_BOOT 0x4
-#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7
-#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8
-#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3
-#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000
-#define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19
-#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0
-#define NVM_CFG1_FUNC_PERSONALITY_ISCSI 0x1
-#define NVM_CFG1_FUNC_PERSONALITY_FCOE 0x2
-#define NVM_CFG1_FUNC_PERSONALITY_ROCE 0x3
-#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000
-#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0
-#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1
-
- u32 pci_cfg; /* 0x18 */
-#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F
-#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0
-#define NVM_CFG1_FUNC_RESERVESD12_MASK 0x00003F80
-#define NVM_CFG1_FUNC_RESERVESD12_OFFSET 7
-#define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000
-#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14
-#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0
-#define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1
-#define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2
-#define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3
-#define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4
-#define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5
-#define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6
-#define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7
-#define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8
-#define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9
-#define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA
-#define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB
-#define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC
-#define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD
-#define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE
-#define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF
-#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000
-#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18
-
- struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr; /* 0x1C */
-
- struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr; /* 0x24 */
- u32 preboot_generic_cfg; /* 0x2C */
- u32 reserved[8]; /* 0x30 */
+ struct nvm_cfg_mac_address mac_address;
+ u32 rsrv1;
+ u32 rsrv2;
+ u32 device_id;
+ u32 cmn_cfg;
+ u32 pci_cfg;
+ struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr;
+ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr;
+ u32 preboot_generic_cfg;
+ u32 reserved[8];
};
struct nvm_cfg1 {
- struct nvm_cfg1_glob glob; /* 0x0 */
-
- struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */
-
- struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */
-
- struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */
-};
-
-/******************************************
-* nvm_cfg structs
-******************************************/
-
-enum nvm_cfg_sections {
- NVM_CFG_SECTION_NVM_CFG1,
- NVM_CFG_SECTION_MAX
-};
-
-struct nvm_cfg {
- u32 num_sections;
- u32 sections_offset[NVM_CFG_SECTION_MAX];
- struct nvm_cfg1 cfg1;
+ struct nvm_cfg1_glob glob;
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX];
+ struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
+ struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
};
-
-#define PORT_0 0
-#define PORT_1 1
-#define PORT_2 2
-#define PORT_3 3
-
-extern struct spad_layout g_spad;
-
-#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */
-
-#define SPAD_OFFSET(addr) (((u32)addr - (u32)CPU_SPAD_BASE))
-
-#define TO_OFFSIZE(_offset, _size) \
- (u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_SHIFT) | \
- (((u32)(_size) >> 2) << OFFSIZE_SIZE_SHIFT))
-
-enum spad_sections {
- SPAD_SECTION_TRACE,
- SPAD_SECTION_NVM_CFG,
- SPAD_SECTION_PUBLIC,
- SPAD_SECTION_PRIVATE,
- SPAD_SECTION_MAX
-};
-
-struct spad_layout {
- struct nvm_cfg nvm_cfg;
- struct mcp_public_data public_data;
-};
-
-#define CRC_MAGIC_VALUE 0xDEBB20E3
-#define CRC32_POLYNOMIAL 0xEDB88320
-#define NVM_CRC_SIZE (sizeof(u32))
-
-enum nvm_sw_arbitrator {
- NVM_SW_ARB_HOST,
- NVM_SW_ARB_MCP,
- NVM_SW_ARB_UART,
- NVM_SW_ARB_RESERVED
-};
-
-/****************************************************************************
-* Boot Strap Region *
-****************************************************************************/
-struct legacy_bootstrap_region {
- u32 magic_value;
-#define NVM_MAGIC_VALUE 0x669955aa
- u32 sram_start_addr;
- u32 code_len; /* boot code length (in dwords) */
- u32 code_start_addr;
- u32 crc; /* 32-bit CRC */
-};
-
-/****************************************************************************
-* Directories Region *
-****************************************************************************/
-struct nvm_code_entry {
- u32 image_type; /* Image type */
- u32 nvm_start_addr; /* NVM address of the image */
- u32 len; /* Include CRC */
- u32 sram_start_addr;
- u32 sram_run_addr; /* Relevant in case of MIM only */
-};
-
-enum nvm_image_type {
- NVM_TYPE_TIM1 = 0x01,
- NVM_TYPE_TIM2 = 0x02,
- NVM_TYPE_MIM1 = 0x03,
- NVM_TYPE_MIM2 = 0x04,
- NVM_TYPE_MBA = 0x05,
- NVM_TYPE_MODULES_PN = 0x06,
- NVM_TYPE_VPD = 0x07,
- NVM_TYPE_MFW_TRACE1 = 0x08,
- NVM_TYPE_MFW_TRACE2 = 0x09,
- NVM_TYPE_NVM_CFG1 = 0x0a,
- NVM_TYPE_L2B = 0x0b,
- NVM_TYPE_DIR1 = 0x0c,
- NVM_TYPE_EAGLE_FW1 = 0x0d,
- NVM_TYPE_FALCON_FW1 = 0x0e,
- NVM_TYPE_PCIE_FW1 = 0x0f,
- NVM_TYPE_HW_SET = 0x10,
- NVM_TYPE_LIM = 0x11,
- NVM_TYPE_AVS_FW1 = 0x12,
- NVM_TYPE_DIR2 = 0x13,
- NVM_TYPE_CCM = 0x14,
- NVM_TYPE_EAGLE_FW2 = 0x15,
- NVM_TYPE_FALCON_FW2 = 0x16,
- NVM_TYPE_PCIE_FW2 = 0x17,
- NVM_TYPE_AVS_FW2 = 0x18,
-
- NVM_TYPE_MAX,
-};
-
-#define MAX_NVM_DIR_ENTRIES 200
-
-struct nvm_dir {
- s32 seq;
-#define NVM_DIR_NEXT_MFW_MASK 0x00000001
-#define NVM_DIR_SEQ_MASK 0xfffffffe
-#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK)
-
-#define IS_DIR_SEQ_VALID(seq) ((seq & NVM_DIR_SEQ_MASK) != NVM_DIR_SEQ_MASK)
-
- u32 num_images;
- u32 rsrv;
- struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */
-};
-
-#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \
- (_num_images - \
- 1) * sizeof(struct nvm_code_entry) + \
- NVM_CRC_SIZE)
-
-struct nvm_vpd_image {
- u32 format_revision;
-#define VPD_IMAGE_VERSION 1
-
- /* This array length depends on the number of VPD fields */
- u8 vpd_data[1];
-};
-
-/****************************************************************************
-* NVRAM FULL MAP *
-****************************************************************************/
-#define DIR_ID_1 (0)
-#define DIR_ID_2 (1)
-#define MAX_DIR_IDS (2)
-
-#define MFW_BUNDLE_1 (0)
-#define MFW_BUNDLE_2 (1)
-#define MAX_MFW_BUNDLES (2)
-
-#define FLASH_PAGE_SIZE 0x1000
-#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE) /* 4Kb */
-#define ASIC_MIM_MAX_SIZE (300 * FLASH_PAGE_SIZE) /* 1.2Mb */
-#define FPGA_MIM_MAX_SIZE (25 * FLASH_PAGE_SIZE) /* 60Kb */
-
-#define LIM_MAX_SIZE ((2 * \
- FLASH_PAGE_SIZE) - \
- sizeof(struct legacy_bootstrap_region) - \
- NVM_RSV_SIZE)
-#define LIM_OFFSET (NVM_OFFSET(lim_image))
-#define NVM_RSV_SIZE (44)
-#define MIM_MAX_SIZE(is_asic) ((is_asic) ? ASIC_MIM_MAX_SIZE : \
- FPGA_MIM_MAX_SIZE)
-#define MIM_OFFSET(idx, is_asic) (NVM_OFFSET(dir[MAX_MFW_BUNDLES]) + \
- ((idx == \
- NVM_TYPE_MIM2) ? MIM_MAX_SIZE(is_asic) : 0))
-#define NVM_FIXED_AREA_SIZE(is_asic) (sizeof(struct nvm_image) + \
- MIM_MAX_SIZE(is_asic) * 2)
-
-union nvm_dir_union {
- struct nvm_dir dir;
- u8 page[FLASH_PAGE_SIZE];
-};
-
-/* Address
- * +-------------------+ 0x000000
- * | Bootstrap: |
- * | magic_number |
- * | sram_start_addr |
- * | code_len |
- * | code_start_addr |
- * | crc |
- * +-------------------+ 0x000014
- * | rsrv |
- * +-------------------+ 0x000040
- * | LIM |
- * +-------------------+ 0x002000
- * | Dir1 |
- * +-------------------+ 0x003000
- * | Dir2 |
- * +-------------------+ 0x004000
- * | MIM1 |
- * +-------------------+ 0x130000
- * | MIM2 |
- * +-------------------+ 0x25C000
- * | Rest Images: |
- * | TIM1/2 |
- * | MFW_TRACE1/2 |
- * | Eagle/Falcon FW |
- * | PCIE/AVS FW |
- * | MBA/CCM/L2B |
- * | VPD |
- * | optic_modules |
- * | ... |
- * +-------------------+ 0x400000
- */
-struct nvm_image {
-/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
- /* NVM Offset (size) */
- struct legacy_bootstrap_region bootstrap;
- u8 rsrv[NVM_RSV_SIZE];
- u8 lim_image[LIM_MAX_SIZE];
- union nvm_dir_union dir[MAX_MFW_BUNDLES];
-
- /* MIM1_IMAGE 0x004000 (0x12c000) */
- /* MIM2_IMAGE 0x130000 (0x12c000) */
-/*********** !!! FIXED SECTIONS !!! DO NOT MODIFY !!! **********************/
-}; /* 0x134 */
-
-#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->f))))
-
-struct hw_set_info {
- u32 reg_type;
-#define GRC_REG_TYPE 1
-#define PHY_REG_TYPE 2
-#define PCI_REG_TYPE 4
-
- u32 bank_num;
- u32 pf_num;
- u32 operation;
-#define READ_OP 1
-#define WRITE_OP 2
-#define RMW_SET_OP 3
-#define RMW_CLR_OP 4
-
- u32 reg_addr;
- u32 reg_data;
-
- u32 reset_type;
-#define POR_RESET_TYPE BIT(0)
-#define HARD_RESET_TYPE BIT(1)
-#define CORE_RESET_TYPE BIT(2)
-#define MCP_RESET_TYPE BIT(3)
-#define PERSET_ASSERT BIT(4)
-#define PERSET_DEASSERT BIT(5)
-};
-
-struct hw_set_image {
- u32 format_version;
-#define HW_SET_IMAGE_VERSION 1
- u32 no_hw_sets;
-
- /* This array length depends on the no_hw_sets */
- struct hw_set_info hw_sets[1];
-};
-
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c
index a95a3e4b3101..e17885321faf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c
@@ -23,6 +23,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
#define QED_BAR_ACQUIRE_TIMEOUT 1000
@@ -236,8 +237,12 @@ static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
quota = min_t(size_t, n - done,
PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
- qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
- hw_offset = qed_ptt_get_bar_addr(p_ptt);
+ if (IS_PF(p_hwfn->cdev)) {
+ qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
+ hw_offset = qed_ptt_get_bar_addr(p_ptt);
+ } else {
+ hw_offset = hw_addr + done;
+ }
dw_count = quota / 4;
host_addr = (u32 *)((u8 *)addr + done);
@@ -338,14 +343,25 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn,
*(u32 *)&p_ptt->pxp.pretend);
}
+u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+ u32 concrete_fid = 0;
+
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
+ SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
+
+ return concrete_fid;
+}
+
/* DMAE */
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_src_type_grc,
const u8 is_dst_type_grc,
struct qed_dmae_params *p_params)
{
+ u16 opcode_b = 0;
u32 opcode = 0;
- u16 opcodeB = 0;
/* Whether the source is the PCIe or the GRC.
* 0- The source is the PCIe
@@ -387,14 +403,24 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
DMAE_CMD_DST_ADDR_RESET_SHIFT);
- opcodeB |= (DMAE_CMD_SRC_VF_ID_MASK <<
- DMAE_CMD_SRC_VF_ID_SHIFT);
+ /* SRC/DST VFID: all 1's - pf, otherwise VF id */
+ if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
+ opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
+ opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
+ } else {
+ opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
+ DMAE_CMD_SRC_VF_ID_SHIFT;
+ }
- opcodeB |= (DMAE_CMD_DST_VF_ID_MASK <<
- DMAE_CMD_DST_VF_ID_SHIFT);
+ if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
+ opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
+ opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
+ } else {
+ opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
+ }
p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
- p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcodeB);
+ p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b);
}
u32 qed_dmae_idx_to_go_cmd(u8 idx)
@@ -420,7 +446,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
idx_cmd,
le32_to_cpu(command->opcode),
le16_to_cpu(command->opcode_b),
- le16_to_cpu(command->length),
+ le16_to_cpu(command->length_dw),
le32_to_cpu(command->src_addr_hi),
le32_to_cpu(command->src_addr_lo),
le32_to_cpu(command->dst_addr_hi),
@@ -435,7 +461,7 @@ qed_dmae_post_command(struct qed_hwfn *p_hwfn,
idx_cmd,
le32_to_cpu(command->opcode),
le16_to_cpu(command->opcode_b),
- le16_to_cpu(command->length),
+ le16_to_cpu(command->length_dw),
le32_to_cpu(command->src_addr_hi),
le32_to_cpu(command->src_addr_lo),
le32_to_cpu(command->dst_addr_hi),
@@ -619,7 +645,7 @@ static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
- cmd->length = cpu_to_le16((u16)length);
+ cmd->length_dw = cpu_to_le16((u16)length);
qed_dmae_post_command(p_hwfn, p_ptt);
@@ -742,17 +768,62 @@ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
return rc;
}
+int
+qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr,
+ dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
+{
+ u32 grc_addr_in_dw = grc_addr / sizeof(u32);
+ struct qed_dmae_params params;
+ int rc;
+
+ memset(&params, 0, sizeof(struct qed_dmae_params));
+ params.flags = flags;
+
+ mutex_lock(&p_hwfn->dmae_info.mutex);
+
+ rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
+ dest_addr, QED_DMAE_ADDRESS_GRC,
+ QED_DMAE_ADDRESS_HOST_VIRT,
+ size_in_dwords, &params);
+
+ mutex_unlock(&p_hwfn->dmae_info.mutex);
+
+ return rc;
+}
+
+int
+qed_dmae_host2host(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ dma_addr_t source_addr,
+ dma_addr_t dest_addr,
+ u32 size_in_dwords, struct qed_dmae_params *p_params)
+{
+ int rc;
+
+ mutex_lock(&(p_hwfn->dmae_info.mutex));
+
+ rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
+ dest_addr,
+ QED_DMAE_ADDRESS_HOST_PHYS,
+ QED_DMAE_ADDRESS_HOST_PHYS,
+ size_in_dwords, p_params);
+
+ mutex_unlock(&(p_hwfn->dmae_info.mutex));
+
+ return rc;
+}
+
u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
- enum protocol_type proto,
- union qed_qm_pq_params *p_params)
+ enum protocol_type proto, union qed_qm_pq_params *p_params)
{
u16 pq_id = 0;
- if ((proto == PROTOCOLID_CORE || proto == PROTOCOLID_ETH) &&
- !p_params) {
+ if ((proto == PROTOCOLID_CORE ||
+ proto == PROTOCOLID_ETH ||
+ proto == PROTOCOLID_ISCSI ||
+ proto == PROTOCOLID_ROCE) && !p_params) {
DP_NOTICE(p_hwfn,
- "Protocol %d received NULL PQ params\n",
- proto);
+ "Protocol %d received NULL PQ params\n", proto);
return 0;
}
@@ -760,11 +831,28 @@ u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
case PROTOCOLID_CORE:
if (p_params->core.tc == LB_TC)
pq_id = p_hwfn->qm_info.pure_lb_pq;
+ else if (p_params->core.tc == OOO_LB_TC)
+ pq_id = p_hwfn->qm_info.ooo_pq;
else
pq_id = p_hwfn->qm_info.offload_pq;
break;
case PROTOCOLID_ETH:
pq_id = p_params->eth.tc;
+ if (p_params->eth.is_vf)
+ pq_id += p_hwfn->qm_info.vf_queues_offset +
+ p_params->eth.vf_id;
+ break;
+ case PROTOCOLID_ISCSI:
+ if (p_params->iscsi.q_idx == 1)
+ pq_id = p_hwfn->qm_info.pure_ack_pq;
+ break;
+ case PROTOCOLID_ROCE:
+ if (p_params->roce.dcqcn)
+ pq_id = p_params->roce.qpid;
+ else
+ pq_id = p_hwfn->qm_info.offload_pq;
+ if (pq_id > p_hwfn->qm_info.num_pf_rls)
+ pq_id = p_hwfn->qm_info.offload_pq;
break;
default:
pq_id = 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index e56d433793be..d01557092868 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -221,6 +221,16 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief qed_vfid_to_concrete - build a concrete FID for a
+ * given VF ID
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfid
+ */
+u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid);
+
+/**
* @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
* this is declared here since other files will require it.
* @param idx
@@ -244,6 +254,10 @@ void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
union qed_qm_pq_params {
struct {
+ u8 q_idx;
+ } iscsi;
+
+ struct {
u8 tc;
} core;
@@ -252,11 +266,15 @@ union qed_qm_pq_params {
u8 vf_id;
u8 tc;
} eth;
+
+ struct {
+ u8 dcqcn;
+ u8 qpid; /* roce relative */
+ } roce;
};
u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
- enum protocol_type proto,
- union qed_qm_pq_params *params);
+ enum protocol_type proto, union qed_qm_pq_params *params);
int qed_init_fw_data(struct qed_dev *cdev,
const u8 *fw_data);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index f55ebdc3c832..23e455f22adc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -31,7 +31,6 @@ enum cminterface {
};
/* general constants */
-#define QM_PQ_ELEMENT_SIZE 4 /* in bytes */
#define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
QM_PQ_ELEMENT_SIZE, \
0x1000) : 0)
@@ -44,28 +43,28 @@ enum cminterface {
/* other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
/* WFQ constants */
-#define QM_WFQ_UPPER_BOUND 6250000
+#define QM_WFQ_UPPER_BOUND 62500000
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
#define QM_WFQ_VP_PQ_PF_SHIFT 5
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
-#define QM_WFQ_MAX_INC_VAL 4375000
-#define QM_WFQ_INIT_CRD(inc_val) (2 * (inc_val))
+#define QM_WFQ_MAX_INC_VAL 43750000
+
/* RL constants */
-#define QM_RL_UPPER_BOUND 6250000
+#define QM_RL_UPPER_BOUND 62500000
#define QM_RL_PERIOD 5 /* in us */
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
+#define QM_RL_MAX_INC_VAL 43750000
#define QM_RL_INC_VAL(rate) max_t(u32, \
- (((rate ? rate : 1000000) \
- * QM_RL_PERIOD) / 8), 1)
-#define QM_RL_MAX_INC_VAL 4375000
+ (u32)(((rate ? rate : \
+ 1000000) * \
+ QM_RL_PERIOD * \
+ 101) / (8 * 100)), 1)
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
#define QM_OPPOR_FW_STOP_DEF 0
#define QM_OPPOR_PQ_EMPTY_DEF 1
-#define EAGLE_WORKAROUND_TC 7
/* Command Queue constants */
#define PBF_CMDQ_PURE_LB_LINES 150
-#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8
#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
@@ -80,7 +79,6 @@ enum cminterface {
/* BTB: blocks constants (block size = 256B) */
#define BTB_JUMBO_PKT_BLOCKS 38
#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
-#define BTB_EAGLE_WORKAROUND_BLOCKS 4
#define BTB_PURE_LB_FACTOR 10
#define BTB_PURE_LB_RATIO 7
/* QM stop command constants */
@@ -107,9 +105,9 @@ enum cminterface {
cmd ## _ ## field, \
value)
/* QM: VOQ macros */
-#define PHYS_VOQ(port, tc, max_phy_tcs_pr_port) ((port) * \
- (max_phy_tcs_pr_port) \
- + (tc))
+#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \
+ (max_phys_tcs_per_port) + \
+ (tc))
#define LB_VOQ(port) ( \
MAX_PHYS_VOQS + (port))
#define VOQ(port, tc, max_phy_tcs_pr_port) \
@@ -120,8 +118,7 @@ enum cminterface {
: LB_VOQ(port))
/******************** INTERNAL IMPLEMENTATION *********************/
/* Prepare PF RL enable/disable runtime init values */
-static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
- bool pf_rl_en)
+static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
@@ -130,8 +127,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
(1 << MAX_NUM_VOQS) - 1);
/* write RL period */
STORE_RT_REG(p_hwfn,
- QM_REG_RLPFPERIOD_RT_OFFSET,
- QM_RL_PERIOD_CLK_25M);
+ QM_REG_RLPFPERIOD_RT_OFFSET, QM_RL_PERIOD_CLK_25M);
STORE_RT_REG(p_hwfn,
QM_REG_RLPFPERIODTIMER_RT_OFFSET,
QM_RL_PERIOD_CLK_25M);
@@ -144,8 +140,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn,
}
/* Prepare PF WFQ enable/disable runtime init values */
-static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
- bool pf_wfq_en)
+static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
/* set credit threshold for QM bypass flow */
@@ -156,8 +151,7 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn,
}
/* Prepare VPORT RL enable/disable runtime init values */
-static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
- bool vport_rl_en)
+static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn, bool vport_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
vport_rl_en ? 1 : 0);
@@ -178,8 +172,7 @@ static void qed_enable_vport_rl(struct qed_hwfn *p_hwfn,
}
/* Prepare VPORT WFQ enable/disable runtime init values */
-static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
- bool vport_wfq_en)
+static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
{
STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
vport_wfq_en ? 1 : 0);
@@ -194,8 +187,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn,
* the specified VOQ
*/
static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
- u8 voq,
- u16 cmdq_lines)
+ u8 voq, u16 cmdq_lines)
{
u32 qm_line_crd;
@@ -221,7 +213,7 @@ static void qed_cmdq_lines_rt_init(
u8 max_phys_tcs_per_port,
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
- u8 tc, voq, port_id;
+ u8 tc, voq, port_id, num_tcs_in_port;
/* clear PBF lines for all VOQs */
for (voq = 0; voq < MAX_NUM_VOQS; voq++)
@@ -229,22 +221,31 @@ static void qed_cmdq_lines_rt_init(
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
if (port_params[port_id].active) {
u16 phys_lines, phys_lines_per_tc;
- u8 phys_tcs = port_params[port_id].num_active_phys_tcs;
- /* find #lines to divide between the active
- * physical TCs.
- */
+ /* find #lines to divide between active phys TCs */
phys_lines = port_params[port_id].num_pbf_cmd_lines -
PBF_CMDQ_PURE_LB_LINES;
/* find #lines per active physical TC */
- phys_lines_per_tc = phys_lines / phys_tcs;
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ num_tcs_in_port++;
+ }
+
+ phys_lines_per_tc = phys_lines / num_tcs_in_port;
/* init registers per active TC */
- for (tc = 0; tc < phys_tcs; tc++) {
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) != 1)
+ continue;
+
voq = PHYS_VOQ(port_id, tc,
max_phys_tcs_per_port);
qed_cmdq_lines_voq_rt_init(p_hwfn, voq,
phys_lines_per_tc);
}
+
/* init registers for pure LB TC */
qed_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id),
PBF_CMDQ_PURE_LB_LINES);
@@ -259,34 +260,42 @@ static void qed_btb_blocks_rt_init(
struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
- u8 tc, voq, port_id;
+ u8 tc, voq, port_id, num_tcs_in_port;
for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
u32 temp;
- u8 phys_tcs;
if (!port_params[port_id].active)
continue;
- phys_tcs = port_params[port_id].num_active_phys_tcs;
-
/* subtract headroom blocks */
usable_blocks = port_params[port_id].num_btb_blocks -
BTB_HEADROOM_BLOCKS;
- /* find blocks per physical TC. use factor to avoid
- * floating arithmethic.
- */
+ /* find blocks per physical TC */
+ num_tcs_in_port = 0;
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) == 1)
+ num_tcs_in_port++;
+ }
+
pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
- (phys_tcs * BTB_PURE_LB_FACTOR +
+ (num_tcs_in_port * BTB_PURE_LB_FACTOR +
BTB_PURE_LB_RATIO);
pure_lb_blocks = max_t(u32, BTB_JUMBO_PKT_BLOCKS,
pure_lb_blocks / BTB_PURE_LB_FACTOR);
- phys_blocks = (usable_blocks - pure_lb_blocks) / phys_tcs;
+ phys_blocks = (usable_blocks - pure_lb_blocks) /
+ num_tcs_in_port;
/* init physical TCs */
- for (tc = 0; tc < phys_tcs; tc++) {
- voq = PHYS_VOQ(port_id, tc, max_phys_tcs_per_port);
+ for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
+ if (((port_params[port_id].active_phys_tcs >>
+ tc) & 0x1) != 1)
+ continue;
+
+ voq = PHYS_VOQ(port_id, tc,
+ max_phys_tcs_per_port);
STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(voq),
phys_blocks);
}
@@ -360,10 +369,11 @@ static void qed_tx_pq_map_rt_init(
memset(&tx_pq_map, 0, sizeof(tx_pq_map));
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID,
- is_vf_pq ? 1 : 0);
+ p_params->pq_params[i].rl_valid ? 1 : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID,
- is_vf_pq ? p_params->pq_params[i].vport_id : 0);
+ p_params->pq_params[i].rl_valid ?
+ p_params->pq_params[i].vport_id : 0);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq);
SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP,
p_params->pq_params[i].wrr_group);
@@ -390,25 +400,11 @@ static void qed_tx_pq_map_rt_init(
/* store Tx PQ VF mask to size select register */
for (i = 0; i < num_tx_pq_vf_masks; i++) {
if (tx_pq_vf_mask[i]) {
- if (is_bb_a0) {
- u32 curr_mask = 0, addr;
-
- addr = QM_REG_MAXPQSIZETXSEL_0 + (i * 4);
- if (!p_params->is_first_pf)
- curr_mask = qed_rd(p_hwfn, p_ptt,
- addr);
-
- addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
-
- STORE_RT_REG(p_hwfn, addr,
- curr_mask | tx_pq_vf_mask[i]);
- } else {
- u32 addr;
+ u32 addr;
- addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
- STORE_RT_REG(p_hwfn, addr,
- tx_pq_vf_mask[i]);
- }
+ addr = QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i;
+ STORE_RT_REG(p_hwfn, addr,
+ tx_pq_vf_mask[i]);
}
}
}
@@ -418,8 +414,7 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
u8 port_id,
u8 pf_id,
u32 num_pf_cids,
- u32 num_tids,
- u32 base_mem_addr_4kb)
+ u32 num_tids, u32 base_mem_addr_4kb)
{
u16 i, pq_id;
@@ -465,15 +460,10 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
(p_params->pf_id % MAX_NUM_PFS_BB);
inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
- if (inc_val > QM_WFQ_MAX_INC_VAL) {
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
return -1;
}
- STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
- inc_val);
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
- QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
for (i = 0; i < num_tx_pqs; i++) {
u8 voq = VOQ(p_params->port_id, p_params->pq_params[i].tc_id,
@@ -481,19 +471,21 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
OVERWRITE_RT_REG(p_hwfn,
crd_reg_offset + voq * MAX_NUM_PFS_BB,
- QM_WFQ_INIT_CRD(inc_val) |
QM_WFQ_CRD_REG_SIGN_BIT);
}
+ STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
+ inc_val);
+ STORE_RT_REG(p_hwfn,
+ QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
+ QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT);
return 0;
}
/* Prepare PF RL runtime init values for the specified PF.
* Return -1 on error.
*/
-static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn,
- u8 pf_id,
- u32 pf_rl)
+static int qed_pf_rl_rt_init(struct qed_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
@@ -607,9 +599,7 @@ static bool qed_poll_on_qm_cmd_ready(struct qed_hwfn *p_hwfn,
static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 cmd_addr,
- u32 cmd_data_lsb,
- u32 cmd_data_msb)
+ u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb)
{
if (!qed_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
return false;
@@ -627,9 +617,7 @@ static bool qed_send_qm_cmd(struct qed_hwfn *p_hwfn,
u32 qed_qm_pf_mem_size(u8 pf_id,
u32 num_pf_cids,
u32 num_vf_cids,
- u32 num_tids,
- u16 num_pf_pqs,
- u16 num_vf_pqs)
+ u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs)
{
return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
@@ -712,10 +700,22 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
+{
+ u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
+
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration");
+ return -1;
+ }
+
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
+ return 0;
+}
+
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 pf_id,
- u32 pf_rl)
+ struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl)
{
u32 inc_val = QM_RL_INC_VAL(pf_rl);
@@ -732,10 +732,32 @@ int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
+{
+ u32 inc_val = QM_WFQ_INC_VAL(vport_wfq);
+ u8 tc;
+
+ if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn, "Invalid VPORT WFQ weight configuration");
+ return -1;
+ }
+
+ for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ u16 vport_pq_id = first_tx_pq_id[tc];
+
+ if (vport_pq_id != QM_INVALID_PQ_ID)
+ qed_wr(p_hwfn, p_ptt,
+ QM_REG_WFQVPWEIGHT + vport_pq_id * 4,
+ inc_val);
+ }
+
+ return 0;
+}
+
int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u8 vport_id,
- u32 vport_rl)
+ struct qed_ptt *p_ptt, u8 vport_id, u32 vport_rl)
{
u32 inc_val = QM_RL_INC_VAL(vport_rl);
@@ -755,9 +777,7 @@ int qed_init_vport_rl(struct qed_hwfn *p_hwfn,
bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool is_release_cmd,
- bool is_tx_pq,
- u16 start_pq,
- u16 num_pqs)
+ bool is_tx_pq, u16 start_pq, u16 num_pqs)
{
u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id;
@@ -788,3 +808,126 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
return true;
}
+
+static void
+qed_set_tunnel_type_enable_bit(unsigned long *var, int bit, bool enable)
+{
+ if (enable)
+ set_bit(bit, var);
+ else
+ clear_bit(bit, var);
+}
+
+#define PRS_ETH_TUNN_FIC_FORMAT -188897008
+
+void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port)
+{
+ qed_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
+ qed_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
+}
+
+void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, bool vxlan_enable)
+{
+ unsigned long reg_val = 0;
+ u8 shift;
+
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+
+ if (reg_val)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ PRS_ETH_TUNN_FIC_FORMAT);
+
+ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ shift = NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, vxlan_enable);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
+ vxlan_enable ? 1 : 0);
+}
+
+void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ bool eth_gre_enable, bool ip_gre_enable)
+{
+ unsigned long reg_val = 0;
+ u8 shift;
+
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
+
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ PRS_ETH_TUNN_FIC_FORMAT);
+
+ reg_val = qed_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
+ shift = NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_gre_enable);
+
+ shift = NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_gre_enable);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
+ eth_gre_enable ? 1 : 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
+ ip_gre_enable ? 1 : 0);
+}
+
+void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 dest_port)
+{
+ qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
+ qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
+}
+
+void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ bool eth_geneve_enable, bool ip_geneve_enable)
+{
+ unsigned long reg_val = 0;
+ u8 shift;
+
+ reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, eth_geneve_enable);
+
+ shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
+ qed_set_tunnel_type_enable_bit(&reg_val, shift, ip_geneve_enable);
+
+ qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
+ if (reg_val)
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
+ PRS_ETH_TUNN_FIC_FORMAT);
+
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
+ eth_geneve_enable ? 1 : 0);
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, ip_geneve_enable ? 1 : 0);
+
+ /* comp ver */
+ reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0;
+ qed_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val);
+ qed_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val);
+ qed_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val);
+
+ /* EDPM with geneve tunnel not supported in BB_B0 */
+ if (QED_IS_BB_B0(p_hwfn->cdev))
+ return;
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN,
+ eth_geneve_enable ? 1 : 0);
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN,
+ ip_geneve_enable ? 1 : 0);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 3269b3610e03..9866a20d2128 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -18,6 +18,7 @@
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
#define QED_INIT_MAX_POLL_COUNT 100
#define QED_INIT_POLL_PERIOD_US 500
@@ -128,6 +129,9 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn)
{
struct qed_rt_data *rt_data = &p_hwfn->rt_data;
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
rt_data->b_valid = kzalloc(sizeof(bool) * RUNTIME_ARRAY_SIZE,
GFP_KERNEL);
if (!rt_data->b_valid)
@@ -539,8 +543,7 @@ void qed_gtt_init(struct qed_hwfn *p_hwfn)
pxp_global_win[i]);
}
-int qed_init_fw_data(struct qed_dev *cdev,
- const u8 *data)
+int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
{
struct qed_fw_data *fw = cdev->fw_data;
struct bin_buffer_hdr *buf_hdr;
@@ -551,7 +554,11 @@ int qed_init_fw_data(struct qed_dev *cdev,
return -EINVAL;
}
- buf_hdr = (struct bin_buffer_hdr *)data;
+ /* First Dword contains metadata and should be skipped */
+ buf_hdr = (struct bin_buffer_hdr *)(data + sizeof(u32));
+
+ offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset;
+ fw->fw_ver_info = (struct fw_ver_info *)(data + offset);
offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
fw->init_ops = (union init_op *)(data + offset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index 2017b0121f5f..8fa50fa23c8d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -26,6 +26,8 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
struct qed_pi_info {
qed_int_comp_cb_t comp_cb;
@@ -2416,6 +2418,7 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
{
struct qed_dev *cdev = p_hwfn->cdev;
u32 cau_state;
+ u8 timer_res;
memset(p_sb_entry, 0, sizeof(*p_sb_entry));
@@ -2441,6 +2444,23 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
cdev->tx_coalesce_usecs = QED_CAU_DEF_TX_USECS;
}
+ /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
+ if (cdev->rx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (cdev->rx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+ if (cdev->tx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (cdev->tx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
}
@@ -2482,17 +2502,28 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
/* Configure pi coalescing if set */
if (p_hwfn->cdev->int_coalescing_mode == QED_COAL_MODE_ENABLE) {
- u8 timeset = p_hwfn->cdev->rx_coalesce_usecs >>
- (QED_CAU_DEF_RX_TIMER_RES + 1);
+ u8 timeset, timer_res;
u8 num_tc = 1, i;
+ /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
+ if (p_hwfn->cdev->rx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_hwfn->cdev->rx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ timeset = (u8)(p_hwfn->cdev->rx_coalesce_usecs >> timer_res);
qed_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
QED_COAL_RX_STATE_MACHINE,
timeset);
- timeset = p_hwfn->cdev->tx_coalesce_usecs >>
- (QED_CAU_DEF_TX_TIMER_RES + 1);
-
+ if (p_hwfn->cdev->tx_coalesce_usecs <= 0x7F)
+ timer_res = 0;
+ else if (p_hwfn->cdev->tx_coalesce_usecs <= 0xFF)
+ timer_res = 1;
+ else
+ timer_res = 2;
+ timeset = (u8)(p_hwfn->cdev->tx_coalesce_usecs >> timer_res);
for (i = 0; i < num_tc; i++) {
qed_int_cau_conf_pi(p_hwfn, p_ptt,
igu_sb_id, TX_PI(i),
@@ -2513,6 +2544,9 @@ void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
u32 sb_offset;
u32 pi_offset;
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
sb_offset = igu_sb_id * PIS_PER_SB;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
@@ -2542,8 +2576,9 @@ void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
sb_info->sb_ack = 0;
memset(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
- qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
- sb_info->igu_sb_id, 0, 0);
+ if (IS_PF(p_hwfn->cdev))
+ qed_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
+ sb_info->igu_sb_id, 0, 0);
}
/**
@@ -2563,8 +2598,10 @@ static u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn,
/* Assuming continuous set of IGU SBs dedicated for given PF */
if (sb_id == QED_SP_SB_ID)
igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
- else
+ else if (IS_PF(p_hwfn->cdev))
igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
+ else
+ igu_sb_id = qed_vf_get_igu_sb_id(p_hwfn, sb_id);
DP_VERBOSE(p_hwfn, NETIF_MSG_INTR, "SB [%s] index is 0x%04x\n",
(sb_id == QED_SP_SB_ID) ? "DSB" : "non-DSB", igu_sb_id);
@@ -2594,9 +2631,16 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
/* The igu address will hold the absolute address that needs to be
* written to for a specific status block
*/
- sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_IGU_CMD +
- (sb_info->igu_sb_id << 3);
+ if (IS_PF(p_hwfn->cdev)) {
+ sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+ GTT_BAR0_MAP_REG_IGU_CMD +
+ (sb_info->igu_sb_id << 3);
+ } else {
+ sb_info->igu_addr = (u8 __iomem *)p_hwfn->regview +
+ PXP_VF_BAR0_START_IGU +
+ ((IGU_CMD_INT_ACK_BASE +
+ sb_info->igu_sb_id) << 3);
+ }
sb_info->flags |= QED_SB_INFO_INIT;
@@ -2783,24 +2827,20 @@ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
{
p_hwfn->b_int_enabled = 0;
+ if (IS_VF(p_hwfn->cdev))
+ return;
+
qed_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
}
#define IGU_CLEANUP_SLEEP_LENGTH (1000)
-void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 sb_id,
- bool cleanup_set,
- u16 opaque_fid
- )
+static void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 sb_id, bool cleanup_set, u16 opaque_fid)
{
+ u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
- u32 data = 0;
- u32 cmd_ctrl = 0;
- u32 val = 0;
- u32 sb_bit = 0;
- u32 sb_bit_addr = 0;
/* Set the data field */
SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
@@ -2845,11 +2885,9 @@ void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 sb_id,
- u16 opaque,
- bool b_set)
+ u32 sb_id, u16 opaque, bool b_set)
{
- int pi;
+ int pi, i;
/* Set */
if (b_set)
@@ -2858,6 +2896,22 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
/* Clear */
qed_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
+ /* Wait for the IGU SB to cleanup */
+ for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
+ u32 val;
+
+ val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_WRITE_DONE_PENDING + ((sb_id / 32) * 4));
+ if (val & (1 << (sb_id % 32)))
+ usleep_range(10, 20);
+ else
+ break;
+ }
+ if (i == IGU_CLEANUP_SLEEP_LENGTH)
+ DP_NOTICE(p_hwfn,
+ "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
+ sb_id);
+
/* Clear the CAU for the SB */
for (pi = 0; pi < 12; pi++)
qed_wr(p_hwfn, p_ptt,
@@ -2866,13 +2920,11 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- bool b_set,
- bool b_slowpath)
+ bool b_set, bool b_slowpath)
{
u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
- u32 sb_id = 0;
- u32 val = 0;
+ u32 sb_id = 0, val = 0;
val = qed_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
@@ -2888,14 +2940,14 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.opaque_fid,
b_set);
- if (b_slowpath) {
- sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU cleaning slowpath SB [%d]\n", sb_id);
- qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
- p_hwfn->hw_info.opaque_fid,
- b_set);
- }
+ if (!b_slowpath)
+ return;
+
+ sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
+ DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
+ "IGU cleaning slowpath SB [%d]\n", sb_id);
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
+ p_hwfn->hw_info.opaque_fid, b_set);
}
static u32 qed_int_igu_read_cam_block(struct qed_hwfn *p_hwfn,
@@ -2935,9 +2987,9 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
struct qed_igu_info *p_igu_info;
+ u32 val, min_vf = 0, max_vf = 0;
+ u16 sb_id, last_iov_sb_id = 0;
struct qed_igu_block *blk;
- u32 val;
- u16 sb_id;
u16 prev_sb_id = 0xFF;
p_hwfn->hw_info.p_igu_info = kzalloc(sizeof(*p_igu_info), GFP_KERNEL);
@@ -2947,12 +2999,19 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
p_igu_info = p_hwfn->hw_info.p_igu_info;
- /* Initialize base sb / sb cnt for PFs */
+ /* Initialize base sb / sb cnt for PFs and VFs */
p_igu_info->igu_base_sb = 0xffff;
p_igu_info->igu_sb_cnt = 0;
p_igu_info->igu_dsb_id = 0xffff;
p_igu_info->igu_base_sb_iov = 0xffff;
+ if (p_hwfn->cdev->p_iov_info) {
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+ min_vf = p_iov->first_vf_in_pf;
+ max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
+ }
+
for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
sb_id++) {
blk = &p_igu_info->igu_map.igu_blocks[sb_id];
@@ -2986,14 +3045,43 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
(p_igu_info->igu_sb_cnt)++;
}
}
+ } else {
+ if ((blk->function_id >= min_vf) &&
+ (blk->function_id < max_vf)) {
+ /* Available for VFs of this PF */
+ if (p_igu_info->igu_base_sb_iov == 0xffff) {
+ p_igu_info->igu_base_sb_iov = sb_id;
+ } else if (last_iov_sb_id != sb_id - 1) {
+ if (!val) {
+ DP_VERBOSE(p_hwfn->cdev,
+ NETIF_MSG_INTR,
+ "First uninitialized IGU CAM entry at index 0x%04x\n",
+ sb_id);
+ } else {
+ DP_NOTICE(p_hwfn->cdev,
+ "Consecutive igu vectors for HWFN %x vfs is broken [jumps from %04x to %04x]\n",
+ p_hwfn->rel_pf_id,
+ last_iov_sb_id,
+ sb_id); }
+ break;
+ }
+ blk->status |= QED_IGU_STATUS_FREE;
+ p_hwfn->hw_info.p_igu_info->free_blks++;
+ last_iov_sb_id = sb_id;
+ }
}
}
-
- DP_VERBOSE(p_hwfn, NETIF_MSG_INTR,
- "IGU igu_base_sb=0x%x igu_sb_cnt=%d igu_dsb_id=0x%x\n",
- p_igu_info->igu_base_sb,
- p_igu_info->igu_sb_cnt,
- p_igu_info->igu_dsb_id);
+ p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
+
+ DP_VERBOSE(
+ p_hwfn,
+ NETIF_MSG_INTR,
+ "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] igu_dsb_id=0x%x\n",
+ p_igu_info->igu_base_sb,
+ p_igu_info->igu_base_sb_iov,
+ p_igu_info->igu_sb_cnt,
+ p_igu_info->igu_sb_cnt_iov,
+ p_igu_info->igu_dsb_id);
if (p_igu_info->igu_base_sb == 0xffff ||
p_igu_info->igu_dsb_id == 0xffff ||
@@ -3116,6 +3204,23 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
p_sb_cnt_info->sb_free_blk = info->free_blks;
}
+u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+
+ /* Determine origin of SB id */
+ if ((sb_id >= p_info->igu_base_sb) &&
+ (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
+ return sb_id - p_info->igu_base_sb;
+ } else if ((sb_id >= p_info->igu_base_sb_iov) &&
+ (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
+ return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
+ } else {
+ DP_NOTICE(p_hwfn, "SB %d not in range for function\n", sb_id);
+ return 0;
+ }
+}
+
void qed_int_disable_post_isr_release(struct qed_dev *cdev)
{
int i;
@@ -3123,3 +3228,39 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev)
for_each_hwfn(cdev, i)
cdev->hwfns[i].b_int_requested = false;
}
+
+int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u8 timer_res, u16 sb_id, bool tx)
+{
+ struct cau_sb_entry sb_entry;
+ int rc;
+
+ if (!p_hwfn->hw_init_done) {
+ DP_ERR(p_hwfn, "hardware not initialized yet\n");
+ return -EINVAL;
+ }
+
+ rc = qed_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
+ sb_id * sizeof(u64),
+ (u64)(uintptr_t)&sb_entry, 2, 0);
+ if (rc) {
+ DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
+ return rc;
+ }
+
+ if (tx)
+ SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
+ else
+ SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
+
+ rc = qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ sb_id * sizeof(u64), 2, 0);
+ if (rc) {
+ DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index c57f2e680770..0948be64dc78 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -20,6 +20,12 @@
#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */
#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */
+/* Fields of IGU VF CONFIGRATION REGISTER */
+#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */
+#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */
+#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */
+#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */
+#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */
/* Igu control commands
*/
@@ -292,26 +298,8 @@ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
* @param p_hwfn
* @param p_ptt
* @param sb_id - igu status block id
- * @param cleanup_set - set(1) / clear(0)
- * @param opaque_fid - the function for which to perform
- * cleanup, for example a PF on behalf of
- * its VFs.
- */
-void qed_int_igu_cleanup_sb(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 sb_id,
- bool cleanup_set,
- u16 opaque_fid);
-
-/**
- * @brief Status block cleanup. Should be called for each status
- * block that will be used -> both PF / VF
- *
- * @param p_hwfn
- * @param p_ptt
- * @param sb_id - igu status block id
* @param opaque - opaque fid of the sb owner.
- * @param cleanup_set - set(1) / clear(0)
+ * @param b_set - set(1) / clear(0)
*/
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -365,6 +353,16 @@ void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief - Returns an Rx queue index appropriate for usage with given SB.
+ *
+ * @param p_hwfn
+ * @param sb_id - absolute index of SB
+ *
+ * @return index of Rx queue
+ */
+u16 qed_int_queue_id_from_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
* @brief - Enable Interrupt & Attention for hw function
*
* @param p_hwfn
@@ -391,6 +389,9 @@ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
u16 vf_number,
u8 vf_valid);
+int qed_int_set_timer_res(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u8 timer_res, u16 sb_id, bool tx);
+
#define QED_MAPPING_MEMORY_SIZE(dev) (NUM_OF_SBS(dev))
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 3f35c6ca9252..401e738543b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -31,137 +31,25 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_l2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
-enum qed_rss_caps {
- QED_RSS_IPV4 = 0x1,
- QED_RSS_IPV6 = 0x2,
- QED_RSS_IPV4_TCP = 0x4,
- QED_RSS_IPV6_TCP = 0x8,
- QED_RSS_IPV4_UDP = 0x10,
- QED_RSS_IPV6_UDP = 0x20,
-};
-
-/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */
-#define QED_RSS_IND_TABLE_SIZE 128
-#define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */
-
-struct qed_rss_params {
- u8 update_rss_config;
- u8 rss_enable;
- u8 rss_eng_id;
- u8 update_rss_capabilities;
- u8 update_rss_ind_table;
- u8 update_rss_key;
- u8 rss_caps;
- u8 rss_table_size_log;
- u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
- u32 rss_key[QED_RSS_KEY_SIZE];
-};
-
-enum qed_filter_opcode {
- QED_FILTER_ADD,
- QED_FILTER_REMOVE,
- QED_FILTER_MOVE,
- QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
- QED_FILTER_FLUSH, /* Removes all filters */
-};
-
-enum qed_filter_ucast_type {
- QED_FILTER_MAC,
- QED_FILTER_VLAN,
- QED_FILTER_MAC_VLAN,
- QED_FILTER_INNER_MAC,
- QED_FILTER_INNER_VLAN,
- QED_FILTER_INNER_PAIR,
- QED_FILTER_INNER_MAC_VNI_PAIR,
- QED_FILTER_MAC_VNI_PAIR,
- QED_FILTER_VNI,
-};
-
-struct qed_filter_ucast {
- enum qed_filter_opcode opcode;
- enum qed_filter_ucast_type type;
- u8 is_rx_filter;
- u8 is_tx_filter;
- u8 vport_to_add_to;
- u8 vport_to_remove_from;
- unsigned char mac[ETH_ALEN];
- u8 assert_on_error;
- u16 vlan;
- u32 vni;
-};
-
-struct qed_filter_mcast {
- /* MOVE is not supported for multicast */
- enum qed_filter_opcode opcode;
- u8 vport_to_add_to;
- u8 vport_to_remove_from;
- u8 num_mc_addrs;
-#define QED_MAX_MC_ADDRS 64
- unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
-};
-
-struct qed_filter_accept_flags {
- u8 update_rx_mode_config;
- u8 update_tx_mode_config;
- u8 rx_accept_filter;
- u8 tx_accept_filter;
-#define QED_ACCEPT_NONE 0x01
-#define QED_ACCEPT_UCAST_MATCHED 0x02
-#define QED_ACCEPT_UCAST_UNMATCHED 0x04
-#define QED_ACCEPT_MCAST_MATCHED 0x08
-#define QED_ACCEPT_MCAST_UNMATCHED 0x10
-#define QED_ACCEPT_BCAST 0x20
-};
-
-struct qed_sp_vport_update_params {
- u16 opaque_fid;
- u8 vport_id;
- u8 update_vport_active_rx_flg;
- u8 vport_active_rx_flg;
- u8 update_vport_active_tx_flg;
- u8 vport_active_tx_flg;
- u8 update_approx_mcast_flg;
- u8 update_accept_any_vlan_flg;
- u8 accept_any_vlan;
- unsigned long bins[8];
- struct qed_rss_params *rss_params;
- struct qed_filter_accept_flags accept_flags;
-};
-
-enum qed_tpa_mode {
- QED_TPA_MODE_NONE,
- QED_TPA_MODE_UNUSED,
- QED_TPA_MODE_GRO,
- QED_TPA_MODE_MAX
-};
-
-struct qed_sp_vport_start_params {
- enum qed_tpa_mode tpa_mode;
- bool remove_inner_vlan;
- bool drop_ttl0;
- u8 max_buffers_per_cqe;
- u32 concrete_fid;
- u16 opaque_fid;
- u8 vport_id;
- u16 mtu;
-};
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
-static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
- struct qed_sp_vport_start_params *p_params)
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params)
{
struct vport_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
+ u8 abs_vport_id = 0;
int rc = -EINVAL;
u16 rx_mode = 0;
- u8 abs_vport_id = 0;
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != 0)
@@ -184,6 +72,7 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
p_ramrod->mtu = cpu_to_le16(p_params->mtu);
p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
+ p_ramrod->untagged = p_params->only_untagged;
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
@@ -211,6 +100,8 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
break;
}
+ p_ramrod->tx_switching_en = p_params->tx_switching;
+
/* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
p_ramrod->sw_fid = qed_concrete_to_sw_fid(p_hwfn->cdev,
p_params->concrete_fid);
@@ -218,6 +109,21 @@ static int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+int qed_sp_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params)
+{
+ if (IS_VF(p_hwfn->cdev)) {
+ return qed_vf_pf_vport_start(p_hwfn, p_params->vport_id,
+ p_params->mtu,
+ p_params->remove_inner_vlan,
+ p_params->tpa_mode,
+ p_params->max_buffers_per_cqe,
+ p_params->only_untagged);
+ }
+
+ return qed_sp_eth_vport_start(p_hwfn, p_params);
+}
+
static int
qed_sp_vport_update_rss(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
@@ -342,10 +248,6 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
!!(accept_filter & QED_ACCEPT_NONE));
- SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
- (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
- !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
-
SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
!!(accept_filter & QED_ACCEPT_NONE));
@@ -363,6 +265,38 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
}
static void
+qed_sp_vport_update_sge_tpa(struct qed_hwfn *p_hwfn,
+ struct vport_update_ramrod_data *p_ramrod,
+ struct qed_sge_tpa_params *p_params)
+{
+ struct eth_vport_tpa_param *p_tpa;
+
+ if (!p_params) {
+ p_ramrod->common.update_tpa_param_flg = 0;
+ p_ramrod->common.update_tpa_en_flg = 0;
+ p_ramrod->common.update_tpa_param_flg = 0;
+ return;
+ }
+
+ p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
+ p_tpa = &p_ramrod->tpa_param;
+ p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
+ p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
+ p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
+ p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
+
+ p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
+ p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
+ p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
+ p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
+ p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
+ p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
+ p_tpa->tpa_max_size = p_params->tpa_max_size;
+ p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
+ p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
+}
+
+static void
qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
struct vport_update_ramrod_data *p_ramrod,
struct qed_sp_vport_update_params *p_params)
@@ -383,20 +317,24 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
}
}
-static int
-qed_sp_vport_update(struct qed_hwfn *p_hwfn,
- struct qed_sp_vport_update_params *p_params,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
struct qed_rss_params *p_rss_params = p_params->rss_params;
struct vport_update_ramrod_data_cmn *p_cmn;
struct qed_sp_init_data init_data;
struct vport_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
- u8 abs_vport_id = 0;
+ u8 abs_vport_id = 0, val;
int rc = -EINVAL;
+ if (IS_VF(p_hwfn->cdev)) {
+ rc = qed_vf_pf_vport_update(p_hwfn, p_params);
+ return rc;
+ }
+
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc != 0)
return rc;
@@ -425,6 +363,27 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
p_cmn->accept_any_vlan = p_params->accept_any_vlan;
p_cmn->update_accept_any_vlan_flg =
p_params->update_accept_any_vlan_flg;
+
+ p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
+ val = p_params->update_inner_vlan_removal_flg;
+ p_cmn->update_inner_vlan_removal_en_flg = val;
+
+ p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
+ val = p_params->update_default_vlan_enable_flg;
+ p_cmn->update_default_vlan_en_flg = val;
+
+ p_cmn->default_vlan = cpu_to_le16(p_params->default_vlan);
+ p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
+
+ p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
+
+ p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
+ p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
+
+ p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
+ val = p_params->update_anti_spoofing_en_flg;
+ p_ramrod->common.update_anti_spoofing_en_flg = val;
+
rc = qed_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
if (rc) {
/* Return spq entry which is taken in qed_sp_init_request()*/
@@ -436,12 +395,11 @@ qed_sp_vport_update(struct qed_hwfn *p_hwfn,
qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
qed_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
+ qed_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, p_params->sge_tpa_params);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u8 vport_id)
+int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id)
{
struct vport_stop_ramrod_data *p_ramrod;
struct qed_sp_init_data init_data;
@@ -449,6 +407,9 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
u8 abs_vport_id = 0;
int rc;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_vport_stop(p_hwfn);
+
rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
if (rc != 0)
return rc;
@@ -470,13 +431,26 @@ static int qed_sp_vport_stop(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+static int
+qed_vf_pf_accept_flags(struct qed_hwfn *p_hwfn,
+ struct qed_filter_accept_flags *p_accept_flags)
+{
+ struct qed_sp_vport_update_params s_params;
+
+ memset(&s_params, 0, sizeof(s_params));
+ memcpy(&s_params.accept_flags, p_accept_flags,
+ sizeof(struct qed_filter_accept_flags));
+
+ return qed_vf_pf_vport_update(p_hwfn, &s_params);
+}
+
static int qed_filter_accept_cmd(struct qed_dev *cdev,
u8 vport,
struct qed_filter_accept_flags accept_flags,
u8 update_accept_any_vlan,
u8 accept_any_vlan,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
struct qed_sp_vport_update_params vport_update_params;
int i, rc;
@@ -493,6 +467,13 @@ static int qed_filter_accept_cmd(struct qed_dev *cdev,
vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ if (IS_VF(cdev)) {
+ rc = qed_vf_pf_accept_flags(p_hwfn, &accept_flags);
+ if (rc)
+ return rc;
+ continue;
+ }
+
rc = qed_sp_vport_update(p_hwfn, &vport_update_params,
comp_mode, p_comp_data);
if (rc != 0) {
@@ -527,16 +508,14 @@ static int qed_sp_release_queue_cid(
return 0;
}
-static int
-qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct qed_queue_start_common_params *params,
- u8 stats_id,
- u16 bd_max_bytes,
- dma_addr_t bd_chain_phys_addr,
- dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size)
+int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *params,
+ u8 stats_id,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size)
{
struct rx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -593,9 +572,12 @@ qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->num_of_pbl_pages = cpu_to_le16(cqe_pbl_size);
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
- rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ p_ramrod->vf_rx_prod_index = params->vf_qid;
+ if (params->vf_qid)
+ DP_VERBOSE(p_hwfn, QED_MSG_SP,
+ "Queue is meant for VF rxq[%04x]\n", params->vf_qid);
- return rc;
+ return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int
@@ -605,15 +587,24 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr,
- u16 cqe_pbl_size,
- void __iomem **pp_prod)
+ u16 cqe_pbl_size, void __iomem **pp_prod)
{
struct qed_hw_cid_data *p_rx_cid;
- u64 init_prod_val = 0;
+ u32 init_prod_val = 0;
u16 abs_l2_queue = 0;
u8 abs_stats_id = 0;
int rc;
+ if (IS_VF(p_hwfn->cdev)) {
+ return qed_vf_pf_rxq_start(p_hwfn,
+ params->queue_id,
+ params->sb,
+ params->sb_idx,
+ bd_max_bytes,
+ bd_chain_phys_addr,
+ cqe_pbl_addr, cqe_pbl_size, pp_prod);
+ }
+
rc = qed_fw_l2_queue(p_hwfn, params->queue_id, &abs_l2_queue);
if (rc != 0)
return rc;
@@ -624,10 +615,10 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
*pp_prod = (u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_PRODS_OFFSET(abs_l2_queue);
+ MSTORM_ETH_PF_PRODS_OFFSET(abs_l2_queue);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
- __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64),
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
(u32 *)(&init_prod_val));
/* Allocate a CID for the queue */
@@ -656,10 +647,59 @@ qed_sp_eth_rx_queue_start(struct qed_hwfn *p_hwfn,
return rc;
}
-static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
- u16 rx_queue_id,
- bool eq_completion_only,
- bool cqe_completion)
+int qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct rx_queue_update_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ struct qed_hw_cid_data *p_rx_cid;
+ u16 qid, abs_rx_q_id = 0;
+ int rc = -EINVAL;
+ u8 i;
+
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ for (i = 0; i < num_rxqs; i++) {
+ qid = rx_queue_id + i;
+ p_rx_cid = &p_hwfn->p_rx_cids[qid];
+
+ /* Get SPQ entry */
+ init_data.cid = p_rx_cid->cid;
+ init_data.opaque_fid = p_rx_cid->opaque_fid;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ ETH_RAMROD_RX_QUEUE_UPDATE,
+ PROTOCOLID_ETH, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.rx_queue_update;
+
+ qed_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id);
+ qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
+ p_ramrod->rx_queue_id = cpu_to_le16(abs_rx_q_id);
+ p_ramrod->complete_cqe_flg = complete_cqe_flg;
+ p_ramrod->complete_event_flg = complete_event_flg;
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ bool eq_completion_only, bool cqe_completion)
{
struct qed_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id];
struct rx_queue_stop_ramrod_data *p_ramrod = NULL;
@@ -668,6 +708,9 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
u16 abs_rx_q_id = 0;
int rc = -EINVAL;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_rxq_stop(p_hwfn, rx_queue_id, cqe_completion);
+
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_rx_cid->cid;
@@ -703,23 +746,22 @@ static int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
return qed_sp_release_queue_cid(p_hwfn, p_rx_cid);
}
-static int
-qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- u32 cid,
- struct qed_queue_start_common_params *p_params,
- u8 stats_id,
- dma_addr_t pbl_addr,
- u16 pbl_size,
- union qed_qm_pq_params *p_pq_params)
+int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *p_params,
+ u8 stats_id,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ union qed_qm_pq_params *p_pq_params)
{
struct tx_queue_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
struct qed_hw_cid_data *p_tx_cid;
- u8 abs_vport_id;
+ u16 pq_id, abs_tx_q_id = 0;
int rc = -EINVAL;
- u16 pq_id;
+ u8 abs_vport_id;
/* Store information for the stop */
p_tx_cid = &p_hwfn->p_tx_cids[p_params->queue_id];
@@ -730,6 +772,10 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
+ rc = qed_fw_l2_queue(p_hwfn, p_params->queue_id, &abs_tx_q_id);
+ if (rc)
+ return rc;
+
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = cid;
@@ -749,6 +795,7 @@ qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
p_ramrod->sb_index = p_params->sb_idx;
p_ramrod->stats_counter_id = stats_id;
+ p_ramrod->queue_zone_id = cpu_to_le16(abs_tx_q_id);
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
@@ -765,14 +812,21 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
u16 opaque_fid,
struct qed_queue_start_common_params *p_params,
dma_addr_t pbl_addr,
- u16 pbl_size,
- void __iomem **pp_doorbell)
+ u16 pbl_size, void __iomem **pp_doorbell)
{
struct qed_hw_cid_data *p_tx_cid;
union qed_qm_pq_params pq_params;
u8 abs_stats_id = 0;
int rc;
+ if (IS_VF(p_hwfn->cdev)) {
+ return qed_vf_pf_txq_start(p_hwfn,
+ p_params->queue_id,
+ p_params->sb,
+ p_params->sb_idx,
+ pbl_addr, pbl_size, pp_doorbell);
+ }
+
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_stats_id);
if (rc)
return rc;
@@ -813,14 +867,16 @@ qed_sp_eth_tx_queue_start(struct qed_hwfn *p_hwfn,
return rc;
}
-static int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn,
- u16 tx_queue_id)
+int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id)
{
struct qed_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id];
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+ if (IS_VF(p_hwfn->cdev))
+ return qed_vf_pf_txq_stop(p_hwfn, tx_queue_id);
+
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
init_data.cid = p_tx_cid->cid;
@@ -1016,11 +1072,11 @@ qed_filter_ucast_common(struct qed_hwfn *p_hwfn,
return 0;
}
-static int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
- u16 opaque_fid,
- struct qed_filter_ucast *p_filter_cmd,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
struct vport_filter_update_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -1118,7 +1174,7 @@ static inline u32 qed_crc32c_le(u32 seed,
return qed_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
}
-static u8 qed_mcast_bin_from_mac(u8 *mac)
+u8 qed_mcast_bin_from_mac(u8 *mac)
{
u32 crc = qed_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
mac, ETH_ALEN);
@@ -1201,11 +1257,10 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
return qed_spq_post(p_hwfn, p_ent, NULL);
}
-static int
-qed_filter_mcast_cmd(struct qed_dev *cdev,
- struct qed_filter_mcast *p_filter_cmd,
- enum spq_mode comp_mode,
- struct qed_spq_comp_cb *p_comp_data)
+static int qed_filter_mcast_cmd(struct qed_dev *cdev,
+ struct qed_filter_mcast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
{
int rc = 0;
int i;
@@ -1221,8 +1276,10 @@ qed_filter_mcast_cmd(struct qed_dev *cdev,
u16 opaque_fid;
- if (rc != 0)
- break;
+ if (IS_VF(cdev)) {
+ qed_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
+ continue;
+ }
opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -1247,8 +1304,10 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
u16 opaque_fid;
- if (rc != 0)
- break;
+ if (IS_VF(cdev)) {
+ rc = qed_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
+ continue;
+ }
opaque_fid = p_hwfn->hw_info.opaque_fid;
@@ -1257,6 +1316,8 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
p_filter_cmd,
comp_mode,
p_comp_data);
+ if (rc != 0)
+ break;
}
return rc;
@@ -1265,12 +1326,19 @@ static int qed_filter_ucast_cmd(struct qed_dev *cdev,
/* Statistics related code */
static void __qed_get_vport_pstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
- u32 *p_len,
- u16 statistics_bin)
+ u32 *p_len, u16 statistics_bin)
{
- *p_addr = BAR0_MAP_REG_PSDM_RAM +
- PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
- *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+ if (IS_PF(p_hwfn->cdev)) {
+ *p_addr = BAR0_MAP_REG_PSDM_RAM +
+ PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_pstorm_per_queue_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.pstats.len;
+ }
}
static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
@@ -1285,32 +1353,15 @@ static void __qed_get_vport_pstats(struct qed_hwfn *p_hwfn,
statistics_bin);
memset(&pstats, 0, sizeof(pstats));
- qed_memcpy_from(p_hwfn, p_ptt, &pstats,
- pstats_addr, pstats_len);
-
- p_stats->tx_ucast_bytes +=
- HILO_64_REGPAIR(pstats.sent_ucast_bytes);
- p_stats->tx_mcast_bytes +=
- HILO_64_REGPAIR(pstats.sent_mcast_bytes);
- p_stats->tx_bcast_bytes +=
- HILO_64_REGPAIR(pstats.sent_bcast_bytes);
- p_stats->tx_ucast_pkts +=
- HILO_64_REGPAIR(pstats.sent_ucast_pkts);
- p_stats->tx_mcast_pkts +=
- HILO_64_REGPAIR(pstats.sent_mcast_pkts);
- p_stats->tx_bcast_pkts +=
- HILO_64_REGPAIR(pstats.sent_bcast_pkts);
- p_stats->tx_err_drop_pkts +=
- HILO_64_REGPAIR(pstats.error_drop_pkts);
-}
-
-static void __qed_get_vport_tstats_addrlen(struct qed_hwfn *p_hwfn,
- u32 *p_addr,
- u32 *p_len)
-{
- *p_addr = BAR0_MAP_REG_TSDM_RAM +
- TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
- *p_len = sizeof(struct tstorm_per_port_stat);
+ qed_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
+
+ p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
+ p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
+ p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
+ p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
+ p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
+ p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
+ p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
}
static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
@@ -1318,14 +1369,23 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
struct qed_eth_stats *p_stats,
u16 statistics_bin)
{
- u32 tstats_addr = 0, tstats_len = 0;
struct tstorm_per_port_stat tstats;
+ u32 tstats_addr, tstats_len;
- __qed_get_vport_tstats_addrlen(p_hwfn, &tstats_addr, &tstats_len);
+ if (IS_PF(p_hwfn->cdev)) {
+ tstats_addr = BAR0_MAP_REG_TSDM_RAM +
+ TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
+ tstats_len = sizeof(struct tstorm_per_port_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
+ tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
+ }
memset(&tstats, 0, sizeof(tstats));
- qed_memcpy_from(p_hwfn, p_ptt, &tstats,
- tstats_addr, tstats_len);
+ qed_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
p_stats->mftag_filter_discards +=
HILO_64_REGPAIR(tstats.mftag_filter_discard);
@@ -1335,12 +1395,19 @@ static void __qed_get_vport_tstats(struct qed_hwfn *p_hwfn,
static void __qed_get_vport_ustats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
- u32 *p_len,
- u16 statistics_bin)
+ u32 *p_len, u16 statistics_bin)
{
- *p_addr = BAR0_MAP_REG_USDM_RAM +
- USTORM_QUEUE_STAT_OFFSET(statistics_bin);
- *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+ if (IS_PF(p_hwfn->cdev)) {
+ *p_addr = BAR0_MAP_REG_USDM_RAM +
+ USTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_ustorm_per_queue_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
+ *p_len = p_resp->pfdev_info.stats_info.ustats.len;
+ }
}
static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
@@ -1355,31 +1422,31 @@ static void __qed_get_vport_ustats(struct qed_hwfn *p_hwfn,
statistics_bin);
memset(&ustats, 0, sizeof(ustats));
- qed_memcpy_from(p_hwfn, p_ptt, &ustats,
- ustats_addr, ustats_len);
-
- p_stats->rx_ucast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
- p_stats->rx_mcast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
- p_stats->rx_bcast_bytes +=
- HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
- p_stats->rx_ucast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
- p_stats->rx_mcast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
- p_stats->rx_bcast_pkts +=
- HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
+ qed_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
+
+ p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
+ p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
+ p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
+ p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
+ p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
+ p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
}
static void __qed_get_vport_mstats_addrlen(struct qed_hwfn *p_hwfn,
u32 *p_addr,
- u32 *p_len,
- u16 statistics_bin)
+ u32 *p_len, u16 statistics_bin)
{
- *p_addr = BAR0_MAP_REG_MSDM_RAM +
- MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
- *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+ if (IS_PF(p_hwfn->cdev)) {
+ *p_addr = BAR0_MAP_REG_MSDM_RAM +
+ MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
+ *p_len = sizeof(struct eth_mstorm_per_queue_stat);
+ } else {
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
+
+ *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
+ *p_len = p_resp->pfdev_info.stats_info.mstats.len;
+ }
}
static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
@@ -1394,21 +1461,17 @@ static void __qed_get_vport_mstats(struct qed_hwfn *p_hwfn,
statistics_bin);
memset(&mstats, 0, sizeof(mstats));
- qed_memcpy_from(p_hwfn, p_ptt, &mstats,
- mstats_addr, mstats_len);
+ qed_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
- p_stats->no_buff_discards +=
- HILO_64_REGPAIR(mstats.no_buff_discard);
+ p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
p_stats->packet_too_big_discard +=
HILO_64_REGPAIR(mstats.packet_too_big_discard);
- p_stats->ttl0_discard +=
- HILO_64_REGPAIR(mstats.ttl0_discard);
+ p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
p_stats->tpa_coalesced_pkts +=
HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
p_stats->tpa_coalesced_events +=
HILO_64_REGPAIR(mstats.tpa_coalesced_events);
- p_stats->tpa_aborts_num +=
- HILO_64_REGPAIR(mstats.tpa_aborts_num);
+ p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
p_stats->tpa_coalesced_bytes +=
HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
}
@@ -1427,51 +1490,51 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
offsetof(struct public_port, stats),
sizeof(port_stats));
- p_stats->rx_64_byte_packets += port_stats.pmm.r64;
- p_stats->rx_127_byte_packets += port_stats.pmm.r127;
- p_stats->rx_255_byte_packets += port_stats.pmm.r255;
- p_stats->rx_511_byte_packets += port_stats.pmm.r511;
- p_stats->rx_1023_byte_packets += port_stats.pmm.r1023;
- p_stats->rx_1518_byte_packets += port_stats.pmm.r1518;
- p_stats->rx_1522_byte_packets += port_stats.pmm.r1522;
- p_stats->rx_2047_byte_packets += port_stats.pmm.r2047;
- p_stats->rx_4095_byte_packets += port_stats.pmm.r4095;
- p_stats->rx_9216_byte_packets += port_stats.pmm.r9216;
- p_stats->rx_16383_byte_packets += port_stats.pmm.r16383;
- p_stats->rx_crc_errors += port_stats.pmm.rfcs;
- p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf;
- p_stats->rx_pause_frames += port_stats.pmm.rxpf;
- p_stats->rx_pfc_frames += port_stats.pmm.rxpp;
- p_stats->rx_align_errors += port_stats.pmm.raln;
- p_stats->rx_carrier_errors += port_stats.pmm.rfcr;
- p_stats->rx_oversize_packets += port_stats.pmm.rovr;
- p_stats->rx_jabbers += port_stats.pmm.rjbr;
- p_stats->rx_undersize_packets += port_stats.pmm.rund;
- p_stats->rx_fragments += port_stats.pmm.rfrg;
- p_stats->tx_64_byte_packets += port_stats.pmm.t64;
- p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127;
- p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255;
- p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511;
- p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023;
- p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518;
- p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047;
- p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095;
- p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216;
- p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383;
- p_stats->tx_pause_frames += port_stats.pmm.txpf;
- p_stats->tx_pfc_frames += port_stats.pmm.txpp;
- p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec;
- p_stats->tx_total_collisions += port_stats.pmm.tncl;
- p_stats->rx_mac_bytes += port_stats.pmm.rbyte;
- p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca;
- p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca;
- p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca;
- p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok;
- p_stats->tx_mac_bytes += port_stats.pmm.tbyte;
- p_stats->tx_mac_uc_packets += port_stats.pmm.txuca;
- p_stats->tx_mac_mc_packets += port_stats.pmm.txmca;
- p_stats->tx_mac_bc_packets += port_stats.pmm.txbca;
- p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf;
+ p_stats->rx_64_byte_packets += port_stats.eth.r64;
+ p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
+ p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
+ p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
+ p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
+ p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
+ p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
+ p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
+ p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
+ p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
+ p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
+ p_stats->rx_crc_errors += port_stats.eth.rfcs;
+ p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
+ p_stats->rx_pause_frames += port_stats.eth.rxpf;
+ p_stats->rx_pfc_frames += port_stats.eth.rxpp;
+ p_stats->rx_align_errors += port_stats.eth.raln;
+ p_stats->rx_carrier_errors += port_stats.eth.rfcr;
+ p_stats->rx_oversize_packets += port_stats.eth.rovr;
+ p_stats->rx_jabbers += port_stats.eth.rjbr;
+ p_stats->rx_undersize_packets += port_stats.eth.rund;
+ p_stats->rx_fragments += port_stats.eth.rfrg;
+ p_stats->tx_64_byte_packets += port_stats.eth.t64;
+ p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
+ p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
+ p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
+ p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
+ p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
+ p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
+ p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
+ p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
+ p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
+ p_stats->tx_pause_frames += port_stats.eth.txpf;
+ p_stats->tx_pfc_frames += port_stats.eth.txpp;
+ p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
+ p_stats->tx_total_collisions += port_stats.eth.tncl;
+ p_stats->rx_mac_bytes += port_stats.eth.rbyte;
+ p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
+ p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
+ p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
+ p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
+ p_stats->tx_mac_bytes += port_stats.eth.tbyte;
+ p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
+ p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
+ p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
+ p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
for (j = 0; j < 8; j++) {
p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
p_stats->brb_discards += port_stats.brb.brb_discard[j];
@@ -1481,44 +1544,49 @@ static void __qed_get_vport_port_stats(struct qed_hwfn *p_hwfn,
static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_eth_stats *stats,
- u16 statistics_bin)
+ u16 statistics_bin, bool b_get_port_stats)
{
__qed_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
__qed_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
- if (p_hwfn->mcp_info)
+ if (b_get_port_stats && p_hwfn->mcp_info)
__qed_get_vport_port_stats(p_hwfn, p_ptt, stats);
}
static void _qed_get_vport_stats(struct qed_dev *cdev,
struct qed_eth_stats *stats)
{
- u8 fw_vport = 0;
- int i;
+ u8 fw_vport = 0;
+ int i;
memset(stats, 0, sizeof(*stats));
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
- struct qed_ptt *p_ptt;
-
- /* The main vport index is relative first */
- if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
- DP_ERR(p_hwfn, "No vport available!\n");
- continue;
+ struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+ : NULL;
+
+ if (IS_PF(cdev)) {
+ /* The main vport index is relative first */
+ if (qed_fw_vport(p_hwfn, 0, &fw_vport)) {
+ DP_ERR(p_hwfn, "No vport available!\n");
+ goto out;
+ }
}
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt) {
+ if (IS_PF(cdev) && !p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
- __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport);
+ __qed_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
+ IS_PF(cdev) ? true : false);
- qed_ptt_release(p_hwfn, p_ptt);
+out:
+ if (IS_PF(cdev) && p_ptt)
+ qed_ptt_release(p_hwfn, p_ptt);
}
}
@@ -1552,10 +1620,11 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
struct eth_mstorm_per_queue_stat mstats;
struct eth_ustorm_per_queue_stat ustats;
struct eth_pstorm_per_queue_stat pstats;
- struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
+ struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn)
+ : NULL;
u32 addr = 0, len = 0;
- if (!p_ptt) {
+ if (IS_PF(cdev) && !p_ptt) {
DP_ERR(p_hwfn, "Failed to acquire ptt\n");
continue;
}
@@ -1572,7 +1641,8 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
__qed_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
qed_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
- qed_ptt_release(p_hwfn, p_ptt);
+ if (IS_PF(cdev))
+ qed_ptt_release(p_hwfn, p_ptt);
}
/* PORT statistics are not necessarily reset, so we need to
@@ -1593,32 +1663,68 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
info->num_tc = 1;
- if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
- for_each_hwfn(cdev, i)
- info->num_queues += FEAT_NUM(&cdev->hwfns[i],
- QED_PF_L2_QUE);
- if (cdev->int_params.fp_msix_cnt)
- info->num_queues = min_t(u8, info->num_queues,
- cdev->int_params.fp_msix_cnt);
+ if (IS_PF(cdev)) {
+ int max_vf_vlan_filters = 0;
+
+ if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
+ for_each_hwfn(cdev, i)
+ info->num_queues +=
+ FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
+ if (cdev->int_params.fp_msix_cnt)
+ info->num_queues =
+ min_t(u8, info->num_queues,
+ cdev->int_params.fp_msix_cnt);
+ } else {
+ info->num_queues = cdev->num_hwfns;
+ }
+
+ if (IS_QED_SRIOV(cdev))
+ max_vf_vlan_filters = cdev->p_iov_info->total_vfs *
+ QED_ETH_VF_NUM_VLAN_FILTERS;
+ info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN) -
+ max_vf_vlan_filters;
+
+ ether_addr_copy(info->port_mac,
+ cdev->hwfns[0].hw_info.hw_mac_addr);
} else {
- info->num_queues = cdev->num_hwfns;
- }
+ qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev), &info->num_queues);
+ if (cdev->num_hwfns > 1) {
+ u8 queues = 0;
+
+ qed_vf_get_num_rxqs(&cdev->hwfns[1], &queues);
+ info->num_queues += queues;
+ }
- info->num_vlan_filters = RESC_NUM(&cdev->hwfns[0], QED_VLAN);
- ether_addr_copy(info->port_mac,
- cdev->hwfns[0].hw_info.hw_mac_addr);
+ qed_vf_get_num_vlan_filters(&cdev->hwfns[0],
+ &info->num_vlan_filters);
+ qed_vf_get_port_mac(&cdev->hwfns[0], info->port_mac);
+ }
qed_fill_dev_info(cdev, &info->common);
+ if (IS_VF(cdev))
+ memset(info->common.hw_mac, 0, ETH_ALEN);
+
return 0;
}
static void qed_register_eth_ops(struct qed_dev *cdev,
- struct qed_eth_cb_ops *ops,
- void *cookie)
+ struct qed_eth_cb_ops *ops, void *cookie)
{
- cdev->protocol_ops.eth = ops;
- cdev->ops_cookie = cookie;
+ cdev->protocol_ops.eth = ops;
+ cdev->ops_cookie = cookie;
+
+ /* For VF, we start bulletin reading */
+ if (IS_VF(cdev))
+ qed_vf_start_iov_wq(cdev);
+}
+
+static bool qed_check_mac(struct qed_dev *cdev, u8 *mac)
+{
+ if (IS_PF(cdev))
+ return true;
+
+ return qed_vf_check_mac(&cdev->hwfns[0], mac);
}
static int qed_start_vport(struct qed_dev *cdev,
@@ -1633,6 +1739,7 @@ static int qed_start_vport(struct qed_dev *cdev,
start.tpa_mode = params->gro_enable ? QED_TPA_MODE_GRO :
QED_TPA_MODE_NONE;
start.remove_inner_vlan = params->remove_inner_vlan;
+ start.only_untagged = true; /* untagged only */
start.drop_ttl0 = params->drop_ttl0;
start.opaque_fid = p_hwfn->hw_info.opaque_fid;
start.concrete_fid = p_hwfn->hw_info.concrete_fid;
@@ -1653,7 +1760,8 @@ static int qed_start_vport(struct qed_dev *cdev,
start.vport_id, start.mtu);
}
- qed_reset_vport_stats(cdev);
+ if (params->clear_stats)
+ qed_reset_vport_stats(cdev);
return 0;
}
@@ -1699,6 +1807,8 @@ static int qed_update_vport(struct qed_dev *cdev,
params->update_vport_active_flg;
sp_params.vport_active_rx_flg = params->vport_active_flg;
sp_params.vport_active_tx_flg = params->vport_active_flg;
+ sp_params.update_tx_switching_flg = params->update_tx_switching_flg;
+ sp_params.tx_switching_flg = params->tx_switching_flg;
sp_params.accept_any_vlan = params->accept_any_vlan;
sp_params.update_accept_any_vlan_flg =
params->update_accept_any_vlan_flg;
@@ -1744,9 +1854,7 @@ static int qed_update_vport(struct qed_dev *cdev,
sp_rss_params.update_rss_capabilities = 1;
sp_rss_params.update_rss_ind_table = 1;
sp_rss_params.update_rss_key = 1;
- sp_rss_params.rss_caps = QED_RSS_IPV4 |
- QED_RSS_IPV6 |
- QED_RSS_IPV4_TCP | QED_RSS_IPV6_TCP;
+ sp_rss_params.rss_caps = params->rss_params.rss_caps;
sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */
memcpy(sp_rss_params.rss_ind_table,
params->rss_params.rss_ind_table,
@@ -1899,6 +2007,39 @@ static int qed_stop_txq(struct qed_dev *cdev,
return 0;
}
+static int qed_tunn_configure(struct qed_dev *cdev,
+ struct qed_tunn_params *tunn_params)
+{
+ struct qed_tunn_update_params tunn_info;
+ int i, rc;
+
+ if (IS_VF(cdev))
+ return 0;
+
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ if (tunn_params->update_vxlan_port == 1) {
+ tunn_info.update_vxlan_udp_port = 1;
+ tunn_info.vxlan_udp_port = tunn_params->vxlan_port;
+ }
+
+ if (tunn_params->update_geneve_port == 1) {
+ tunn_info.update_geneve_udp_port = 1;
+ tunn_info.geneve_udp_port = tunn_params->geneve_port;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+
+ rc = qed_sp_pf_update_tunn_cfg(hwfn, &tunn_info,
+ QED_SPQ_MODE_EBLOCK, NULL);
+
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
enum qed_filter_rx_mode_type type)
{
@@ -2026,10 +2167,25 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
cqe);
}
+#ifdef CONFIG_QED_SRIOV
+extern const struct qed_iov_hv_ops qed_iov_ops_pass;
+#endif
+
+#ifdef CONFIG_DCB
+extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
+#endif
+
static const struct qed_eth_ops qed_eth_ops_pass = {
.common = &qed_common_ops_pass,
+#ifdef CONFIG_QED_SRIOV
+ .iov = &qed_iov_ops_pass,
+#endif
+#ifdef CONFIG_DCB
+ .dcb = &qed_dcbnl_ops_pass,
+#endif
.fill_dev_info = &qed_fill_eth_dev_info,
.register_ops = &qed_register_eth_ops,
+ .check_mac = &qed_check_mac,
.vport_start = &qed_start_vport,
.vport_stop = &qed_stop_vport,
.vport_update = &qed_update_vport,
@@ -2041,16 +2197,11 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.fastpath_stop = &qed_fastpath_stop,
.eth_cqe_completion = &qed_fp_cqe_completion,
.get_vport_stats = &qed_get_vport_stats,
+ .tunn_config = &qed_tunn_configure,
};
-const struct qed_eth_ops *qed_get_eth_ops(u32 version)
+const struct qed_eth_ops *qed_get_eth_ops(void)
{
- if (version != QED_ETH_INTERFACE_VERSION) {
- pr_notice("Cannot supply ethtool operations [%08x != %08x]\n",
- version, QED_ETH_INTERFACE_VERSION);
- return NULL;
- }
-
return &qed_eth_ops_pass;
}
EXPORT_SYMBOL(qed_get_eth_ops);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
new file mode 100644
index 000000000000..002114543451
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -0,0 +1,239 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+#ifndef _QED_L2_H
+#define _QED_L2_H
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/qed/qed_eth_if.h>
+#include "qed.h"
+#include "qed_hw.h"
+#include "qed_sp.h"
+
+struct qed_sge_tpa_params {
+ u8 max_buffers_per_cqe;
+
+ u8 update_tpa_en_flg;
+ u8 tpa_ipv4_en_flg;
+ u8 tpa_ipv6_en_flg;
+ u8 tpa_ipv4_tunn_en_flg;
+ u8 tpa_ipv6_tunn_en_flg;
+
+ u8 update_tpa_param_flg;
+ u8 tpa_pkt_split_flg;
+ u8 tpa_hdr_data_split_flg;
+ u8 tpa_gro_consistent_flg;
+ u8 tpa_max_aggs_num;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+};
+
+enum qed_filter_opcode {
+ QED_FILTER_ADD,
+ QED_FILTER_REMOVE,
+ QED_FILTER_MOVE,
+ QED_FILTER_REPLACE, /* Delete all MACs and add new one instead */
+ QED_FILTER_FLUSH, /* Removes all filters */
+};
+
+enum qed_filter_ucast_type {
+ QED_FILTER_MAC,
+ QED_FILTER_VLAN,
+ QED_FILTER_MAC_VLAN,
+ QED_FILTER_INNER_MAC,
+ QED_FILTER_INNER_VLAN,
+ QED_FILTER_INNER_PAIR,
+ QED_FILTER_INNER_MAC_VNI_PAIR,
+ QED_FILTER_MAC_VNI_PAIR,
+ QED_FILTER_VNI,
+};
+
+struct qed_filter_ucast {
+ enum qed_filter_opcode opcode;
+ enum qed_filter_ucast_type type;
+ u8 is_rx_filter;
+ u8 is_tx_filter;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ unsigned char mac[ETH_ALEN];
+ u8 assert_on_error;
+ u16 vlan;
+ u32 vni;
+};
+
+struct qed_filter_mcast {
+ /* MOVE is not supported for multicast */
+ enum qed_filter_opcode opcode;
+ u8 vport_to_add_to;
+ u8 vport_to_remove_from;
+ u8 num_mc_addrs;
+#define QED_MAX_MC_ADDRS 64
+ unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
+};
+
+int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ bool eq_completion_only, bool cqe_completion);
+
+int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id);
+
+enum qed_tpa_mode {
+ QED_TPA_MODE_NONE,
+ QED_TPA_MODE_UNUSED,
+ QED_TPA_MODE_GRO,
+ QED_TPA_MODE_MAX
+};
+
+struct qed_sp_vport_start_params {
+ enum qed_tpa_mode tpa_mode;
+ bool remove_inner_vlan;
+ bool tx_switching;
+ bool only_untagged;
+ bool drop_ttl0;
+ u8 max_buffers_per_cqe;
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u8 vport_id;
+ u16 mtu;
+};
+
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params);
+
+struct qed_rss_params {
+ u8 update_rss_config;
+ u8 rss_enable;
+ u8 rss_eng_id;
+ u8 update_rss_capabilities;
+ u8 update_rss_ind_table;
+ u8 update_rss_key;
+ u8 rss_caps;
+ u8 rss_table_size_log;
+ u16 rss_ind_table[QED_RSS_IND_TABLE_SIZE];
+ u32 rss_key[QED_RSS_KEY_SIZE];
+};
+
+struct qed_filter_accept_flags {
+ u8 update_rx_mode_config;
+ u8 update_tx_mode_config;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+#define QED_ACCEPT_NONE 0x01
+#define QED_ACCEPT_UCAST_MATCHED 0x02
+#define QED_ACCEPT_UCAST_UNMATCHED 0x04
+#define QED_ACCEPT_MCAST_MATCHED 0x08
+#define QED_ACCEPT_MCAST_UNMATCHED 0x10
+#define QED_ACCEPT_BCAST 0x20
+};
+
+struct qed_sp_vport_update_params {
+ u16 opaque_fid;
+ u8 vport_id;
+ u8 update_vport_active_rx_flg;
+ u8 vport_active_rx_flg;
+ u8 update_vport_active_tx_flg;
+ u8 vport_active_tx_flg;
+ u8 update_inner_vlan_removal_flg;
+ u8 inner_vlan_removal_flg;
+ u8 silent_vlan_removal_flg;
+ u8 update_default_vlan_enable_flg;
+ u8 default_vlan_enable_flg;
+ u8 update_default_vlan_flg;
+ u16 default_vlan;
+ u8 update_tx_switching_flg;
+ u8 tx_switching_flg;
+ u8 update_approx_mcast_flg;
+ u8 update_anti_spoofing_en_flg;
+ u8 anti_spoofing_en;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+ unsigned long bins[8];
+ struct qed_rss_params *rss_params;
+ struct qed_filter_accept_flags accept_flags;
+ struct qed_sge_tpa_params *sge_tpa_params;
+};
+
+int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief qed_sp_vport_stop -
+ *
+ * This ramrod closes a VPort after all its RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
+ *
+ * @param p_hwfn
+ * @param opaque_fid
+ * @param vport_id VPort ID
+ *
+ * @return int
+ */
+int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
+
+int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ struct qed_filter_ucast *p_filter_cmd,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
+/**
+ * @brief qed_sp_rx_eth_queues_update -
+ *
+ * This ramrod updates an RX queue. It is used for setting the active state
+ * of the queue and updating the TPA and SGE parameters.
+ *
+ * @note At the moment - only used by non-linux VFs.
+ *
+ * @param p_hwfn
+ * @param rx_queue_id RX Queue ID
+ * @param num_rxqs Allow to update multiple rx
+ * queues, from rx_queue_id to
+ * (rx_queue_id + num_rxqs)
+ * @param complete_cqe_flg Post completion to the CQE Ring if set
+ * @param complete_event_flg Post completion to the Event Ring if set
+ *
+ * @return int
+ */
+
+int
+qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
+ u16 rx_queue_id,
+ u8 num_rxqs,
+ u8 complete_cqe_flg,
+ u8 complete_event_flg,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+
+int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_start_params *p_params);
+
+int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *params,
+ u8 stats_id,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
+
+int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
+ u16 opaque_fid,
+ u32 cid,
+ struct qed_queue_start_common_params *p_params,
+ u8 stats_id,
+ dma_addr_t pbl_addr,
+ u16 pbl_size,
+ union qed_qm_pq_params *p_pq_params);
+
+u8 qed_mcast_bin_from_mac(u8 *mac);
+
+#endif /* _QED_L2_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 26d40db07ddd..c7dc34bfdd0a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -24,10 +24,12 @@
#include <linux/qed/qed_if.h>
#include "qed.h"
+#include "qed_sriov.h"
#include "qed_sp.h"
#include "qed_dev_api.h"
#include "qed_mcp.h"
#include "qed_hw.h"
+#include "qed_selftest.h"
static char version[] =
"QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
@@ -124,7 +126,7 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err1;
}
- if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
+ if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
DP_NOTICE(cdev, "No memory region found in bar #2\n");
rc = -EIO;
goto err1;
@@ -156,7 +158,7 @@ static int qed_init_pci(struct qed_dev *cdev,
}
cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
- if (cdev->pci_params.pm_cap == 0)
+ if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
DP_NOTICE(cdev, "Cannot find power management capability\n");
rc = qed_set_coherency_mask(cdev);
@@ -174,12 +176,14 @@ static int qed_init_pci(struct qed_dev *cdev,
goto err2;
}
- cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
- cdev->db_size = pci_resource_len(cdev->pdev, 2);
- cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
- if (!cdev->doorbells) {
- DP_NOTICE(cdev, "Cannot map doorbell space\n");
- return -ENOMEM;
+ if (IS_PF(cdev)) {
+ cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
+ cdev->db_size = pci_resource_len(cdev->pdev, 2);
+ cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
+ if (!cdev->doorbells) {
+ DP_NOTICE(cdev, "Cannot map doorbell space\n");
+ return -ENOMEM;
+ }
}
return 0;
@@ -203,23 +207,38 @@ int qed_fill_dev_info(struct qed_dev *cdev,
dev_info->pci_mem_start = cdev->pci_params.mem_start;
dev_info->pci_mem_end = cdev->pci_params.mem_end;
dev_info->pci_irq = cdev->pci_params.irq;
+ dev_info->rdma_supported =
+ (cdev->hwfns[0].hw_info.personality == QED_PCI_ETH_ROCE);
dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
- dev_info->fw_major = FW_MAJOR_VERSION;
- dev_info->fw_minor = FW_MINOR_VERSION;
- dev_info->fw_rev = FW_REVISION_VERSION;
- dev_info->fw_eng = FW_ENGINEERING_VERSION;
- dev_info->mf_mode = cdev->mf_mode;
+ if (IS_PF(cdev)) {
+ dev_info->fw_major = FW_MAJOR_VERSION;
+ dev_info->fw_minor = FW_MINOR_VERSION;
+ dev_info->fw_rev = FW_REVISION_VERSION;
+ dev_info->fw_eng = FW_ENGINEERING_VERSION;
+ dev_info->mf_mode = cdev->mf_mode;
+ dev_info->tx_switching = true;
+ } else {
+ qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
+ &dev_info->fw_minor, &dev_info->fw_rev,
+ &dev_info->fw_eng);
+ }
- qed_mcp_get_mfw_ver(cdev, &dev_info->mfw_rev);
+ if (IS_PF(cdev)) {
+ ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
+ if (ptt) {
+ qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->mfw_rev, NULL);
- ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
- if (ptt) {
- qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
- &dev_info->flash_size);
+ qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
+ &dev_info->flash_size);
- qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+ qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
+ }
+ } else {
+ qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
+ &dev_info->mfw_rev, NULL);
}
return 0;
@@ -256,9 +275,7 @@ static int qed_set_power_state(struct qed_dev *cdev,
/* probing */
static struct qed_dev *qed_probe(struct pci_dev *pdev,
- enum qed_protocol protocol,
- u32 dp_module,
- u8 dp_level)
+ struct qed_probe_params *params)
{
struct qed_dev *cdev;
int rc;
@@ -267,9 +284,12 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
if (!cdev)
goto err0;
- cdev->protocol = protocol;
+ cdev->protocol = params->protocol;
+
+ if (params->is_vf)
+ cdev->b_is_vf = true;
- qed_init_dp(cdev, dp_module, dp_level);
+ qed_init_dp(cdev, params->dp_module, params->dp_level);
rc = qed_init_pci(cdev, pdev);
if (rc) {
@@ -395,15 +415,17 @@ static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
/* Fallthrough */
case QED_INT_MODE_MSI:
- rc = pci_enable_msi(cdev->pdev);
- if (!rc) {
- int_params->out.int_mode = QED_INT_MODE_MSI;
- goto out;
- }
+ if (cdev->num_hwfns == 1) {
+ rc = pci_enable_msi(cdev->pdev);
+ if (!rc) {
+ int_params->out.int_mode = QED_INT_MODE_MSI;
+ goto out;
+ }
- DP_NOTICE(cdev, "Failed to enable MSI\n");
- if (force_mode)
- goto out;
+ DP_NOTICE(cdev, "Failed to enable MSI\n");
+ if (force_mode)
+ goto out;
+ }
/* Fallthrough */
case QED_INT_MODE_INTA:
@@ -637,8 +659,13 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
struct qed_sb_cnt_info sb_cnt_info;
int rc;
int i;
- memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+ if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
+ DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
+ return -EINVAL;
+ }
+
+ memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
cdev->int_params.in.int_mode = int_mode;
for_each_hwfn(cdev, i) {
memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
@@ -663,6 +690,35 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
return 0;
}
+static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
+{
+ int rc;
+
+ memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
+ cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
+
+ qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
+ &cdev->int_params.in.num_vectors);
+ if (cdev->num_hwfns > 1) {
+ u8 vectors = 0;
+
+ qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
+ cdev->int_params.in.num_vectors += vectors;
+ }
+
+ /* We want a minimum of one fastpath vector per vf hwfn */
+ cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
+
+ rc = qed_set_int_mode(cdev, true);
+ if (rc)
+ return rc;
+
+ cdev->int_params.fp_msix_base = 0;
+ cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
+
+ return 0;
+}
+
u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
u8 *input_buf, u32 max_size, u8 *unzip_buf)
{
@@ -744,39 +800,63 @@ static void qed_update_pf_params(struct qed_dev *cdev,
static int qed_slowpath_start(struct qed_dev *cdev,
struct qed_slowpath_params *params)
{
+ struct qed_tunn_start_params tunn_info;
struct qed_mcp_drv_version drv_version;
const u8 *data = NULL;
struct qed_hwfn *hwfn;
- int rc;
+ int rc = -EINVAL;
- rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
- &cdev->pdev->dev);
- if (rc) {
- DP_NOTICE(cdev,
- "Failed to find fw file - /lib/firmware/%s\n",
- QED_FW_FILE_NAME);
+ if (qed_iov_wq_start(cdev))
goto err;
+
+ if (IS_PF(cdev)) {
+ rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
+ &cdev->pdev->dev);
+ if (rc) {
+ DP_NOTICE(cdev,
+ "Failed to find fw file - /lib/firmware/%s\n",
+ QED_FW_FILE_NAME);
+ goto err;
+ }
}
rc = qed_nic_setup(cdev);
if (rc)
goto err;
- rc = qed_slowpath_setup_int(cdev, params->int_mode);
+ if (IS_PF(cdev))
+ rc = qed_slowpath_setup_int(cdev, params->int_mode);
+ else
+ rc = qed_slowpath_vf_setup_int(cdev);
if (rc)
goto err1;
- /* Allocate stream for unzipping */
- rc = qed_alloc_stream_mem(cdev);
- if (rc) {
- DP_NOTICE(cdev, "Failed to allocate stream memory\n");
- goto err2;
+ if (IS_PF(cdev)) {
+ /* Allocate stream for unzipping */
+ rc = qed_alloc_stream_mem(cdev);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed to allocate stream memory\n");
+ goto err2;
+ }
+
+ /* First Dword used to diffrentiate between various sources */
+ data = cdev->firmware->data + sizeof(u32);
}
- /* Start the slowpath */
- data = cdev->firmware->data;
+ memset(&tunn_info, 0, sizeof(tunn_info));
+ tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
+ 1 << QED_MODE_L2GRE_TUNN |
+ 1 << QED_MODE_IPGRE_TUNN |
+ 1 << QED_MODE_L2GENEVE_TUNN |
+ 1 << QED_MODE_IPGENEVE_TUNN;
+
+ tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
+ tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
- rc = qed_hw_init(cdev, true, cdev->int_params.out.int_mode,
+ /* Start the slowpath */
+ rc = qed_hw_init(cdev, &tunn_info, true,
+ cdev->int_params.out.int_mode,
true, data);
if (rc)
goto err2;
@@ -784,18 +864,20 @@ static int qed_slowpath_start(struct qed_dev *cdev,
DP_INFO(cdev,
"HW initialization and function start completed successfully\n");
- hwfn = QED_LEADING_HWFN(cdev);
- drv_version.version = (params->drv_major << 24) |
- (params->drv_minor << 16) |
- (params->drv_rev << 8) |
- (params->drv_eng);
- strlcpy(drv_version.name, params->name,
- MCP_DRV_VER_STR_SIZE - 4);
- rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
- &drv_version);
- if (rc) {
- DP_NOTICE(cdev, "Failed sending drv version command\n");
- return rc;
+ if (IS_PF(cdev)) {
+ hwfn = QED_LEADING_HWFN(cdev);
+ drv_version.version = (params->drv_major << 24) |
+ (params->drv_minor << 16) |
+ (params->drv_rev << 8) |
+ (params->drv_eng);
+ strlcpy(drv_version.name, params->name,
+ MCP_DRV_VER_STR_SIZE - 4);
+ rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
+ &drv_version);
+ if (rc) {
+ DP_NOTICE(cdev, "Failed sending drv version command\n");
+ return rc;
+ }
}
qed_reset_vport_stats(cdev);
@@ -804,13 +886,17 @@ static int qed_slowpath_start(struct qed_dev *cdev,
err2:
qed_hw_timers_stop_all(cdev);
- qed_slowpath_irq_free(cdev);
+ if (IS_PF(cdev))
+ qed_slowpath_irq_free(cdev);
qed_free_stream_mem(cdev);
qed_disable_msix(cdev);
err1:
qed_resc_free(cdev);
err:
- release_firmware(cdev->firmware);
+ if (IS_PF(cdev))
+ release_firmware(cdev->firmware);
+
+ qed_iov_wq_stop(cdev, false);
return rc;
}
@@ -820,15 +906,22 @@ static int qed_slowpath_stop(struct qed_dev *cdev)
if (!cdev)
return -ENODEV;
- qed_free_stream_mem(cdev);
+ if (IS_PF(cdev)) {
+ qed_free_stream_mem(cdev);
+ if (IS_QED_ETH_IF(cdev))
+ qed_sriov_disable(cdev, true);
- qed_nic_stop(cdev);
- qed_slowpath_irq_free(cdev);
+ qed_nic_stop(cdev);
+ qed_slowpath_irq_free(cdev);
+ }
qed_disable_msix(cdev);
qed_nic_reset(cdev);
- release_firmware(cdev->firmware);
+ qed_iov_wq_stop(cdev, true);
+
+ if (IS_PF(cdev))
+ release_firmware(cdev->firmware);
return 0;
}
@@ -902,8 +995,12 @@ static u32 qed_sb_release(struct qed_dev *cdev,
return rc;
}
-static int qed_set_link(struct qed_dev *cdev,
- struct qed_link_params *params)
+static bool qed_can_link_change(struct qed_dev *cdev)
+{
+ return true;
+}
+
+static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
{
struct qed_hwfn *hwfn;
struct qed_mcp_link_params *link_params;
@@ -913,6 +1010,9 @@ static int qed_set_link(struct qed_dev *cdev,
if (!cdev)
return -ENODEV;
+ if (IS_VF(cdev))
+ return 0;
+
/* The link should be set only once per PF */
hwfn = &cdev->hwfns[0];
@@ -940,10 +1040,43 @@ static int qed_set_link(struct qed_dev *cdev,
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
if (params->adv_speeds & 0)
link_params->speed.advertised_speeds |=
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G;
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
}
if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
link_params->speed.forced_speed = params->forced_speed;
+ if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
+ if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
+ link_params->pause.autoneg = true;
+ else
+ link_params->pause.autoneg = false;
+ if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
+ link_params->pause.forced_rx = true;
+ else
+ link_params->pause.forced_rx = false;
+ if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
+ link_params->pause.forced_tx = true;
+ else
+ link_params->pause.forced_tx = false;
+ }
+ if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
+ switch (params->loopback_mode) {
+ case QED_LINK_LOOPBACK_INT_PHY:
+ link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
+ break;
+ case QED_LINK_LOOPBACK_EXT_PHY:
+ link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
+ break;
+ case QED_LINK_LOOPBACK_EXT:
+ link_params->loopback_mode = ETH_LOOPBACK_EXT;
+ break;
+ case QED_LINK_LOOPBACK_MAC:
+ link_params->loopback_mode = ETH_LOOPBACK_MAC;
+ break;
+ default:
+ link_params->loopback_mode = ETH_LOOPBACK_NONE;
+ break;
+ }
+ }
rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
@@ -960,6 +1093,7 @@ static int qed_get_port_type(u32 media_type)
case MEDIA_SFPP_10G_FIBER:
case MEDIA_SFP_1G_FIBER:
case MEDIA_XFP_FIBER:
+ case MEDIA_MODULE_FIBER:
case MEDIA_KR:
port_type = PORT_FIBRE;
break;
@@ -980,6 +1114,39 @@ static int qed_get_port_type(u32 media_type)
return port_type;
}
+static int qed_get_link_data(struct qed_hwfn *hwfn,
+ struct qed_mcp_link_params *params,
+ struct qed_mcp_link_state *link,
+ struct qed_mcp_link_capabilities *link_caps)
+{
+ void *p;
+
+ if (!IS_PF(hwfn->cdev)) {
+ qed_vf_get_link_params(hwfn, params);
+ qed_vf_get_link_state(hwfn, link);
+ qed_vf_get_link_caps(hwfn, link_caps);
+
+ return 0;
+ }
+
+ p = qed_mcp_get_link_params(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(params, p, sizeof(*params));
+
+ p = qed_mcp_get_link_state(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link, p, sizeof(*link));
+
+ p = qed_mcp_get_link_capabilities(hwfn);
+ if (!p)
+ return -ENXIO;
+ memcpy(link_caps, p, sizeof(*link_caps));
+
+ return 0;
+}
+
static void qed_fill_link(struct qed_hwfn *hwfn,
struct qed_link_output *if_link)
{
@@ -991,10 +1158,10 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
memset(if_link, 0, sizeof(*if_link));
/* Prepare source inputs */
- memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
- memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
- memcpy(&link_caps, qed_mcp_get_link_capabilities(hwfn),
- sizeof(link_caps));
+ if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
+ dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
+ return;
+ }
/* Set the link parameters to pass to protocol driver */
if (link.link_up)
@@ -1026,7 +1193,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
if_link->advertised_caps |= 0;
if (params.speed.advertised_speeds &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
if_link->advertised_caps |= 0;
if (link_caps.speed_capabilities &
@@ -1043,7 +1210,7 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
if_link->supported_caps |= 0;
if (link_caps.speed_capabilities &
- NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G)
+ NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
if_link->supported_caps |= 0;
if (link.link_up)
@@ -1096,7 +1263,12 @@ static void qed_fill_link(struct qed_hwfn *hwfn,
static void qed_get_current_link(struct qed_dev *cdev,
struct qed_link_output *if_link)
{
+ int i;
+
qed_fill_link(&cdev->hwfns[0], if_link);
+
+ for_each_hwfn(cdev, i)
+ qed_inform_vf_link_state(&cdev->hwfns[i]);
}
void qed_link_update(struct qed_hwfn *hwfn)
@@ -1106,6 +1278,7 @@ void qed_link_update(struct qed_hwfn *hwfn)
struct qed_link_output if_link;
qed_fill_link(hwfn, &if_link);
+ qed_inform_vf_link_state(hwfn);
if (IS_LEAD_HWFN(hwfn) && cookie)
op->link_update(cookie, &if_link);
@@ -1117,6 +1290,9 @@ static int qed_drain(struct qed_dev *cdev)
struct qed_ptt *ptt;
int i, rc;
+ if (IS_VF(cdev))
+ return 0;
+
for_each_hwfn(cdev, i) {
hwfn = &cdev->hwfns[i];
ptt = qed_ptt_acquire(hwfn);
@@ -1133,6 +1309,38 @@ static int qed_drain(struct qed_dev *cdev)
return 0;
}
+static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
+{
+ *rx_coal = cdev->rx_coalesce_usecs;
+ *tx_coal = cdev->tx_coalesce_usecs;
+}
+
+static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
+ u8 qid, u16 sb_id)
+{
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *ptt;
+ int hwfn_index;
+ int status = 0;
+
+ hwfn_index = qid % cdev->num_hwfns;
+ hwfn = &cdev->hwfns[hwfn_index];
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt)
+ return -EAGAIN;
+
+ status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal,
+ qid / cdev->num_hwfns, sb_id);
+ if (status)
+ goto out;
+ status = qed_set_txq_coalesce(hwfn, ptt, tx_coal,
+ qid / cdev->num_hwfns, sb_id);
+out:
+ qed_ptt_release(hwfn, ptt);
+
+ return status;
+}
+
static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
@@ -1150,7 +1358,15 @@ static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
return status;
}
+struct qed_selftest_ops qed_selftest_ops_pass = {
+ .selftest_memory = &qed_selftest_memory,
+ .selftest_interrupt = &qed_selftest_interrupt,
+ .selftest_register = &qed_selftest_register,
+ .selftest_clock = &qed_selftest_clock,
+};
+
const struct qed_common_ops qed_common_ops_pass = {
+ .selftest = &qed_selftest_ops_pass,
.probe = &qed_probe,
.remove = &qed_remove,
.set_power_state = &qed_set_power_state,
@@ -1164,22 +1380,14 @@ const struct qed_common_ops qed_common_ops_pass = {
.sb_release = &qed_sb_release,
.simd_handler_config = &qed_simd_handler_config,
.simd_handler_clean = &qed_simd_handler_clean,
+ .can_link_change = &qed_can_link_change,
.set_link = &qed_set_link,
.get_link = &qed_get_current_link,
.drain = &qed_drain,
.update_msglvl = &qed_init_dp,
.chain_alloc = &qed_chain_alloc,
.chain_free = &qed_chain_free,
+ .get_coalesce = &qed_get_coalesce,
+ .set_coalesce = &qed_set_coalesce,
.set_led = &qed_set_led,
};
-
-u32 qed_get_protocol_version(enum qed_protocol protocol)
-{
- switch (protocol) {
- case QED_PROTOCOL_ETH:
- return QED_ETH_INTERFACE_VERSION;
- default:
- return 0;
- }
-}
-EXPORT_SYMBOL(qed_get_protocol_version);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index b89c9a8e1655..f776a77794c5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -15,10 +15,13 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include "qed.h"
+#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
+#include "qed_sriov.h"
+
#define CHIP_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
@@ -440,6 +443,75 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_mcp_handle_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_PATH);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 path_addr = SECTION_ADDR(mfw_path_offsize,
+ QED_PATH_ID(p_hwfn));
+ u32 disabled_vfs[VF_MAX_STATIC / 32];
+ int i;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_SP,
+ "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
+ mfw_path_offsize, path_addr);
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
+ disabled_vfs[i] = qed_rd(p_hwfn, p_ptt,
+ path_addr +
+ offsetof(struct public_path,
+ mcp_vf_disabled) +
+ sizeof(u32) * i);
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+ "FLR-ed VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
+ }
+
+ if (qed_iov_mark_vf_flr(p_hwfn, disabled_vfs))
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_FLR_FLAG);
+}
+
+int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *vfs_to_ack)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_func_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_func_offsize,
+ MCP_PF_ID(p_hwfn));
+ struct qed_mcp_mb_params mb_params;
+ union drv_union_data union_data;
+ int rc;
+ int i;
+
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, (QED_MSG_SP | QED_MSG_IOV),
+ "Acking VFs [%08x,...,%08x] - %08x\n",
+ i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
+
+ memset(&mb_params, 0, sizeof(mb_params));
+ mb_params.cmd = DRV_MSG_CODE_VF_DISABLED_DONE;
+ memcpy(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
+ mb_params.p_data_src = &union_data;
+ rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to pass ACK for VF flr to MFW\n");
+ return -EBUSY;
+ }
+
+ /* Clear the ACK bits */
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ qed_wr(p_hwfn, p_ptt,
+ func_addr +
+ offsetof(struct public_func, drv_ack_vf_disabled) +
+ i * sizeof(u32), 0);
+
+ return rc;
+}
+
static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -459,9 +531,9 @@ static void qed_mcp_handle_transceiver_change(struct qed_hwfn *p_hwfn,
transceiver_data)));
transceiver_state = GET_FIELD(transceiver_state,
- PMM_TRANSCEIVER_STATE);
+ ETH_TRANSCEIVER_STATE);
- if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
+ if (transceiver_state == ETH_TRANSCEIVER_STATE_PRESENT)
DP_NOTICE(p_hwfn, "Transceiver is present.\n");
else
DP_NOTICE(p_hwfn, "Transceiver is unplugged.\n");
@@ -472,6 +544,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
bool b_reset)
{
struct qed_mcp_link_state *p_link;
+ u8 max_bw, min_bw;
u32 status = 0;
p_link = &p_hwfn->mcp_info->link_output;
@@ -527,17 +600,20 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
p_link->speed = 0;
}
- /* Correct speed according to bandwidth allocation */
- if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
- p_link->speed = p_link->speed *
- p_hwfn->mcp_info->func_info.bandwidth_max /
- 100;
- qed_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
- p_link->speed);
- DP_VERBOSE(p_hwfn, NETIF_MSG_LINK,
- "Configured MAX bandwidth to be %08x Mb/sec\n",
- p_link->speed);
- }
+ if (p_link->link_up && p_link->speed)
+ p_link->line_speed = p_link->speed;
+ else
+ p_link->line_speed = 0;
+
+ max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
+ min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
+
+ /* Max bandwidth configuration */
+ __qed_configure_pf_max_bandwidth(p_hwfn, p_ptt, p_link, max_bw);
+
+ /* Min bandwidth configuration */
+ __qed_configure_pf_min_bandwidth(p_hwfn, p_ptt, p_link, min_bw);
+ qed_configure_vp_wfq_on_link_change(p_hwfn->cdev, p_link->min_pf_rate);
p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
p_link->an_complete = !!(status &
@@ -592,14 +668,12 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
qed_link_update(p_hwfn);
}
-int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- bool b_up)
+int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
{
struct qed_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
struct qed_mcp_mb_params mb_params;
union drv_union_data union_data;
- struct pmm_phy_cfg *phy_cfg;
+ struct eth_phy_cfg *phy_cfg;
int rc = 0;
u32 cmd;
@@ -609,9 +683,9 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
if (!params->speed.autoneg)
phy_cfg->speed = params->speed.forced_speed;
- phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
- phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
- phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
+ phy_cfg->pause |= (params->pause.autoneg) ? ETH_PAUSE_AUTONEG : 0;
+ phy_cfg->pause |= (params->pause.forced_rx) ? ETH_PAUSE_RX : 0;
+ phy_cfg->pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
phy_cfg->adv_speed = params->speed.advertised_speeds;
phy_cfg->loopback_mode = params->loopback_mode;
@@ -648,6 +722,105 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
return 0;
}
+static void qed_read_pf_bandwidth(struct qed_hwfn *p_hwfn,
+ struct public_func *p_shmem_info)
+{
+ struct qed_mcp_function_info *p_info;
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ p_info->bandwidth_min = (p_shmem_info->config &
+ FUNC_MF_CFG_MIN_BW_MASK) >>
+ FUNC_MF_CFG_MIN_BW_SHIFT;
+ if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth minimum out of bounds [%02x]. Set to 1\n",
+ p_info->bandwidth_min);
+ p_info->bandwidth_min = 1;
+ }
+
+ p_info->bandwidth_max = (p_shmem_info->config &
+ FUNC_MF_CFG_MAX_BW_MASK) >>
+ FUNC_MF_CFG_MAX_BW_SHIFT;
+ if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
+ DP_INFO(p_hwfn,
+ "bandwidth maximum out of bounds [%02x]. Set to 100\n",
+ p_info->bandwidth_max);
+ p_info->bandwidth_max = 100;
+ }
+}
+
+static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct public_func *p_data,
+ int pfid)
+{
+ u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+ PUBLIC_FUNC);
+ u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
+ u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
+ u32 i, size;
+
+ memset(p_data, 0, sizeof(*p_data));
+
+ size = min_t(u32, sizeof(*p_data),
+ QED_SECTION_SIZE(mfw_path_offsize));
+ for (i = 0; i < size / sizeof(u32); i++)
+ ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
+ func_addr + (i << 2));
+ return size;
+}
+
+int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_pf)
+{
+ struct public_func shmem_info;
+ int i;
+
+ /* Find first Ethernet interface in port */
+ for (i = 0; i < NUM_OF_ENG_PFS(p_hwfn->cdev);
+ i += p_hwfn->cdev->num_ports_in_engines) {
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID_BY_REL(p_hwfn, i));
+
+ if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
+ continue;
+
+ if ((shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK) ==
+ FUNC_MF_CFG_PROTOCOL_ETHERNET) {
+ *p_pf = (u8)i;
+ return 0;
+ }
+ }
+
+ DP_NOTICE(p_hwfn,
+ "Failed to find on port an ethernet interface in MF_SI mode\n");
+
+ return -EINVAL;
+}
+
+static void qed_mcp_update_bw(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_mcp_function_info *p_info;
+ struct public_func shmem_info;
+ u32 resp = 0, param = 0;
+
+ qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
+ MCP_PF_ID(p_hwfn));
+
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
+
+ p_info = &p_hwfn->mcp_info->func_info;
+
+ qed_configure_pf_min_bandwidth(p_hwfn->cdev, p_info->bandwidth_min);
+ qed_configure_pf_max_bandwidth(p_hwfn->cdev, p_info->bandwidth_max);
+
+ /* Acknowledge the MFW */
+ qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
+ &param);
+}
+
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt)
{
@@ -676,9 +849,27 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
case MFW_DRV_MSG_LINK_CHANGE:
qed_mcp_handle_link_change(p_hwfn, p_ptt, false);
break;
+ case MFW_DRV_MSG_VF_DISABLED:
+ qed_mcp_handle_vf_flr(p_hwfn, p_ptt);
+ break;
+ case MFW_DRV_MSG_LLDP_DATA_UPDATED:
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_REMOTE_LLDP_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_REMOTE_MIB);
+ break;
+ case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
+ qed_dcbx_mib_update_event(p_hwfn, p_ptt,
+ QED_DCBX_OPERATIONAL_MIB);
+ break;
case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
qed_mcp_handle_transceiver_change(p_hwfn, p_ptt);
break;
+ case MFW_DRV_MSG_BW_UPDATE:
+ qed_mcp_update_bw(p_hwfn, p_ptt);
+ break;
default:
DP_NOTICE(p_hwfn, "Unimplemented MFW message %d\n", i);
rc = -EINVAL;
@@ -709,26 +900,42 @@ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
return rc;
}
-int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
- u32 *p_mfw_ver)
+int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_mfw_ver, u32 *p_running_bundle_id)
{
- struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
- struct qed_ptt *p_ptt;
u32 global_offsize;
- p_ptt = qed_ptt_acquire(p_hwfn);
- if (!p_ptt)
- return -EBUSY;
+ if (IS_VF(p_hwfn->cdev)) {
+ if (p_hwfn->vf_iov_info) {
+ struct pfvf_acquire_resp_tlv *p_resp;
+
+ p_resp = &p_hwfn->vf_iov_info->acquire_resp;
+ *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
+ return 0;
+ } else {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF requested MFW version prior to ACQUIRE\n");
+ return -EINVAL;
+ }
+ }
global_offsize = qed_rd(p_hwfn, p_ptt,
- SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
- public_base,
+ SECTION_OFFSIZE_ADDR(p_hwfn->
+ mcp_info->public_base,
PUBLIC_GLOBAL));
- *p_mfw_ver = qed_rd(p_hwfn, p_ptt,
- SECTION_ADDR(global_offsize, 0) +
- offsetof(struct public_global, mfw_ver));
-
- qed_ptt_release(p_hwfn, p_ptt);
+ *p_mfw_ver =
+ qed_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize,
+ 0) + offsetof(struct public_global, mfw_ver));
+
+ if (p_running_bundle_id != NULL) {
+ *p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
+ SECTION_ADDR(global_offsize, 0) +
+ offsetof(struct public_global,
+ running_bundle_id));
+ }
return 0;
}
@@ -739,6 +946,9 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
struct qed_hwfn *p_hwfn = &cdev->hwfns[0];
struct qed_ptt *p_ptt;
+ if (IS_VF(cdev))
+ return -EINVAL;
+
if (!qed_mcp_is_init(p_hwfn)) {
DP_NOTICE(p_hwfn, "MFW is not initialized !\n");
return -EBUSY;
@@ -758,28 +968,6 @@ int qed_mcp_get_media_type(struct qed_dev *cdev,
return 0;
}
-static u32 qed_mcp_get_shmem_func(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct public_func *p_data,
- int pfid)
-{
- u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
- PUBLIC_FUNC);
- u32 mfw_path_offsize = qed_rd(p_hwfn, p_ptt, addr);
- u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
- u32 i, size;
-
- memset(p_data, 0, sizeof(*p_data));
-
- size = min_t(u32, sizeof(*p_data),
- QED_SECTION_SIZE(mfw_path_offsize));
- for (i = 0; i < size / sizeof(u32); i++)
- ((u32 *)p_data)[i] = qed_rd(p_hwfn, p_ptt,
- func_addr + (i << 2));
-
- return size;
-}
-
static int
qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
struct public_func *p_info,
@@ -789,7 +977,18 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
case FUNC_MF_CFG_PROTOCOL_ETHERNET:
- *p_proto = QED_PCI_ETH;
+ if (test_bit(QED_DEV_CAP_ROCE,
+ &p_hwfn->hw_info.device_capabilities))
+ *p_proto = QED_PCI_ETH_ROCE;
+ else
+ *p_proto = QED_PCI_ETH;
+ break;
+ case FUNC_MF_CFG_PROTOCOL_ISCSI:
+ *p_proto = QED_PCI_ISCSI;
+ break;
+ case FUNC_MF_CFG_PROTOCOL_ROCE:
+ DP_NOTICE(p_hwfn, "RoCE personality is not a valid value!\n");
+ rc = -EINVAL;
break;
default:
rc = -EINVAL;
@@ -818,26 +1017,7 @@ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
-
- info->bandwidth_min = (shmem_info.config &
- FUNC_MF_CFG_MIN_BW_MASK) >>
- FUNC_MF_CFG_MIN_BW_SHIFT;
- if (info->bandwidth_min < 1 || info->bandwidth_min > 100) {
- DP_INFO(p_hwfn,
- "bandwidth minimum out of bounds [%02x]. Set to 1\n",
- info->bandwidth_min);
- info->bandwidth_min = 1;
- }
-
- info->bandwidth_max = (shmem_info.config &
- FUNC_MF_CFG_MAX_BW_MASK) >>
- FUNC_MF_CFG_MAX_BW_SHIFT;
- if (info->bandwidth_max < 1 || info->bandwidth_max > 100) {
- DP_INFO(p_hwfn,
- "bandwidth maximum out of bounds [%02x]. Set to 100\n",
- info->bandwidth_max);
- info->bandwidth_max = 100;
- }
+ qed_read_pf_bandwidth(p_hwfn, &shmem_info);
if (shmem_info.mac_upper || shmem_info.mac_lower) {
info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
@@ -914,6 +1094,9 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
{
u32 flash_size;
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
flash_size = qed_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
@@ -924,6 +1107,37 @@ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
return 0;
}
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num)
+{
+ u32 resp = 0, param = 0, rc_param = 0;
+ int rc;
+
+ /* Only Leader can configure MSIX, and need to take CMT into account */
+ if (!IS_LEAD_HWFN(p_hwfn))
+ return 0;
+ num *= p_hwfn->cdev->num_hwfns;
+
+ param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
+ DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
+ param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
+ DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
+ &resp, &rc_param);
+
+ if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
+ DP_NOTICE(p_hwfn, "VF[%d]: MFW failed to set MSI-X\n", vf_id);
+ rc = -EINVAL;
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
+ num, vf_id);
+ }
+
+ return rc;
+}
+
int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -938,9 +1152,10 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
p_drv_version = &union_data.drv_version;
p_drv_version->version = p_ver->version;
- for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
- val = cpu_to_be32(p_ver->name[i]);
- *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
+
+ for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
+ val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
+ *(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
}
memset(&mb_params, 0, sizeof(mb_params));
@@ -979,3 +1194,45 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc;
}
+
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 drv_mb_param = 0, rsp, param;
+ int rc = 0;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_REGISTER_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = -EAGAIN;
+
+ return rc;
+}
+
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 drv_mb_param, rsp, param;
+ int rc = 0;
+
+ drv_mb_param = (DRV_MB_PARAM_BIST_CLOCK_TEST <<
+ DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT);
+
+ rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BIST_TEST,
+ drv_mb_param, &rsp, &param);
+
+ if (rc)
+ return rc;
+
+ if (((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_OK) ||
+ (param != DRV_MB_PARAM_BIST_RC_PASSED))
+ rc = -EAGAIN;
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 50917a2131a5..7f319aa1b229 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -40,7 +40,15 @@ struct qed_mcp_link_capabilities {
struct qed_mcp_link_state {
bool link_up;
- u32 speed; /* In Mb/s */
+ u32 min_pf_rate;
+
+ /* Actual link speed in Mb/s */
+ u32 line_speed;
+
+ /* PF max speed in Mb/s, deduced from line_speed
+ * according to PF max bandwidth configuration.
+ */
+ u32 speed;
bool full_duplex;
bool an;
@@ -141,13 +149,16 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
/**
* @brief Get the management firmware version value
*
- * @param cdev - qed dev pointer
- * @param mfw_ver - mfw version value
+ * @param p_hwfn
+ * @param p_ptt
+ * @param p_mfw_ver - mfw version value
+ * @param p_running_bundle_id - image id in nvram; Optional.
*
- * @return int - 0 - operation was successul.
+ * @return int - 0 - operation was successful.
*/
-int qed_mcp_get_mfw_ver(struct qed_dev *cdev,
- u32 *mfw_ver);
+int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *p_mfw_ver, u32 *p_running_bundle_id);
/**
* @brief Get media type value of the port.
@@ -237,6 +248,28 @@ int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_led_mode mode);
+/**
+ * @brief Bist register test
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
+/**
+ * @brief Bist clock test
+ *
+ * @param p_hwfn - hw function
+ * @param p_ptt - PTT required for register access
+ *
+ * @return int - 0 - operation was successful.
+ */
+int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt);
+
/* Using hwfn number (and not pf_num) is required since in CMT mode,
* same pf_num may be used by two different hwfn
* TODO - this shouldn't really be in .h file, but until all fields
@@ -360,6 +393,18 @@ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
+ * @brief Ack to mfw that driver finished FLR process for VFs
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
+ *
+ * @param return int - 0 upon success.
+ */
+int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *vfs_to_ack);
+
+/**
* @brief - calls during init to read shmem of all function-related info.
*
* @param p_hwfn
@@ -389,4 +434,30 @@ int qed_mcp_reset(struct qed_hwfn *p_hwfn,
*/
bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
+/**
+ * @brief request MFW to configure MSI-X for a VF
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf_id - absolute inside engine
+ * @param num_sbs - number of entries to request
+ *
+ * @return int
+ */
+int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 vf_id, u8 num);
+
+int qed_configure_pf_min_bandwidth(struct qed_dev *cdev, u8 min_bw);
+int qed_configure_pf_max_bandwidth(struct qed_dev *cdev, u8 max_bw);
+int __qed_configure_pf_max_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 max_bw);
+int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_mcp_link_state *p_link,
+ u8 min_bw);
+
+int qed_hw_init_first_eth(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 *p_pf);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index c15b1622e636..f6b86ca1ff79 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -27,6 +27,35 @@
#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \
0xff << 24)
+#define CDU_REG_SEGMENT0_PARAMS \
+ 0x580904UL
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK \
+ (0xfff << 0)
+#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT \
+ 0
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE \
+ (0xff << 16)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT \
+ 16
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE \
+ (0xff << 24)
+#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT \
+ 24
+#define CDU_REG_SEGMENT1_PARAMS \
+ 0x580908UL
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK \
+ (0xfff << 0)
+#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT \
+ 0
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE \
+ (0xff << 16)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT \
+ 16
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE \
+ (0xff << 24)
+#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT \
+ 24
+
#define XSDM_REG_OPERATION_GEN \
0xf80408UL
#define NIG_REG_RX_BRB_OUT_EN \
@@ -39,6 +68,10 @@
0x2aae04UL
#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \
0x2aa16cUL
+#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR \
+ 0x2aa118UL
+#define PSWHST_REG_ZONE_PERMISSION_TABLE \
+ 0x2a0800UL
#define BAR0_MAP_REG_MSDM_RAM \
0x1d00000UL
#define BAR0_MAP_REG_USDM_RAM \
@@ -47,6 +80,8 @@
0x1f00000UL
#define BAR0_MAP_REG_TSDM_RAM \
0x1c80000UL
+#define BAR0_MAP_REG_XSDM_RAM \
+ 0x1e00000UL
#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \
0x5011f4UL
#define PRS_REG_SEARCH_TCP \
@@ -77,6 +112,8 @@
0x2f2eb0UL
#define DORQ_REG_PF_DB_ENABLE \
0x100508UL
+#define DORQ_REG_VF_USAGE_CNT \
+ 0x1009c4UL
#define QM_REG_PF_EN \
0x2f2ea4UL
#define TCFC_REG_STRONG_ENABLE_PF \
@@ -111,6 +148,8 @@
0x009778UL
#define MISCS_REG_CHIP_METAL \
0x009774UL
+#define MISCS_REG_FUNCTION_HIDE \
+ 0x0096f0UL
#define BRB_REG_HEADER_SIZE \
0x340804UL
#define BTB_REG_HEADER_SIZE \
@@ -119,6 +158,8 @@
0x1c0708UL
#define CCFC_REG_ACTIVITY_COUNTER \
0x2e8800UL
+#define CCFC_REG_STRONG_ENABLE_VF \
+ 0x2e070cUL
#define CDU_REG_CID_ADDR_PARAMS \
0x580900UL
#define DBG_REG_CLIENT_ENABLE \
@@ -157,10 +198,18 @@
0x1800004UL
#define NIG_REG_CM_HDR \
0x500840UL
+#define NIG_REG_LLH_TAGMAC_DEF_PF_VECTOR \
+ 0x50196cUL
+#define NIG_REG_LLH_CLS_TYPE_DUALMODE \
+ 0x501964UL
#define NCSI_REG_CONFIG \
0x040200UL
#define PBF_REG_INIT \
0xd80000UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 \
+ 0xd806c8UL
+#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 \
+ 0xd806ccUL
#define PTU_REG_ATC_INIT_ARRAY \
0x560000UL
#define PCM_REG_INIT \
@@ -205,6 +254,10 @@
0x230000UL
#define PRS_REG_SOFT_RST \
0x1f0000UL
+#define PRS_REG_MSG_INFO \
+ 0x1f0a1cUL
+#define PRS_REG_ROCE_DEST_QP_MAX_PF \
+ 0x1f0430UL
#define PSDM_REG_ENABLE_IN1 \
0xfa0004UL
#define PSEM_REG_ENABLE_IN \
@@ -213,6 +266,8 @@
0x280020UL
#define PSWRQ2_REG_CDUT_P_SIZE \
0x24000cUL
+#define PSWRQ2_REG_ILT_MEMORY \
+ 0x260000UL
#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
0x2a0040UL
#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
@@ -385,6 +440,8 @@
0x1d0000UL
#define IGU_REG_PF_CONFIGURATION \
0x180800UL
+#define IGU_REG_VF_CONFIGURATION \
+ 0x180804UL
#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \
0x00849cUL
#define MISC_REG_AEU_AFTER_INVERT_1_IGU \
@@ -411,6 +468,10 @@
0x1 << 0)
#define IGU_REG_MAPPING_MEMORY \
0x184000UL
+#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT \
+ 0x180408UL
+#define IGU_REG_WRITE_DONE_PENDING \
+ 0x180900UL
#define MISCS_REG_GENERIC_POR_0 \
0x0096d4UL
#define MCP_REG_NVM_CFG4 \
@@ -427,4 +488,37 @@
0x2aae60UL
#define PGLUE_B_REG_PF_BAR1_SIZE \
0x2aae64UL
+#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
+#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
+#define PRS_REG_VXLAN_PORT 0x1f0738UL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
+#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
+
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0)
+#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE (0x1 << 1)
+#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE (0x1 << 2)
+#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2
+
+#define NIG_REG_VXLAN_CTRL 0x50105cUL
+#define PBF_REG_VXLAN_PORT 0xd80518UL
+#define PBF_REG_NGE_PORT 0xd8051cUL
+#define PRS_REG_NGE_PORT 0x1f086cUL
+#define NIG_REG_NGE_PORT 0x508b38UL
+
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
+#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL
+
+#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
+#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
+#define NIG_REG_NGE_COMP_VER 0x508b30UL
+#define PBF_REG_NGE_COMP_VER 0xd80524UL
+#define PRS_REG_NGE_COMP_VER 0x1f0878UL
+
+#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
+#define QM_REG_WFQVPWEIGHT 0x2fa000UL
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.c b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
new file mode 100644
index 000000000000..a342bfe4280d
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.c
@@ -0,0 +1,76 @@
+#include "qed.h"
+#include "qed_dev_api.h"
+#include "qed_mcp.h"
+#include "qed_sp.h"
+
+int qed_selftest_memory(struct qed_dev *cdev)
+{
+ int rc = 0, i;
+
+ for_each_hwfn(cdev, i) {
+ rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+int qed_selftest_interrupt(struct qed_dev *cdev)
+{
+ int rc = 0, i;
+
+ for_each_hwfn(cdev, i) {
+ rc = qed_sp_heartbeat_ramrod(&cdev->hwfns[i]);
+ if (rc)
+ return rc;
+ }
+
+ return rc;
+}
+
+int qed_selftest_register(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc = 0, i;
+
+ /* although performed by MCP, this test is per engine */
+ for_each_hwfn(cdev, i) {
+ p_hwfn = &cdev->hwfns[i];
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "failed to acquire ptt\n");
+ return -EBUSY;
+ }
+ rc = qed_mcp_bist_register_test(p_hwfn, p_ptt);
+ qed_ptt_release(p_hwfn, p_ptt);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+int qed_selftest_clock(struct qed_dev *cdev)
+{
+ struct qed_hwfn *p_hwfn;
+ struct qed_ptt *p_ptt;
+ int rc = 0, i;
+
+ /* although performed by MCP, this test is per engine */
+ for_each_hwfn(cdev, i) {
+ p_hwfn = &cdev->hwfns[i];
+ p_ptt = qed_ptt_acquire(p_hwfn);
+ if (!p_ptt) {
+ DP_ERR(p_hwfn, "failed to acquire ptt\n");
+ return -EBUSY;
+ }
+ rc = qed_mcp_bist_clock_test(p_hwfn, p_ptt);
+ qed_ptt_release(p_hwfn, p_ptt);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
new file mode 100644
index 000000000000..50eb0b49950f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
@@ -0,0 +1,40 @@
+#ifndef _QED_SELFTEST_API_H
+#define _QED_SELFTEST_API_H
+#include <linux/types.h>
+
+/**
+ * @brief qed_selftest_memory - Perform memory test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_memory(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_interrupt - Perform interrupt test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_interrupt(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_register - Perform register test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_register(struct qed_dev *cdev);
+
+/**
+ * @brief qed_selftest_clock - Perform clock test
+ *
+ * @param cdev
+ *
+ * @return int
+ */
+int qed_selftest_clock(struct qed_dev *cdev);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index d39f914b66ee..a548504c3420 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -52,6 +52,7 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
union ramrod_data {
struct pf_start_ramrod_data pf_start;
+ struct pf_update_ramrod_data pf_update;
struct rx_queue_start_ramrod_data rx_queue_start;
struct rx_queue_update_ramrod_data rx_queue_update;
struct rx_queue_stop_ramrod_data rx_queue_stop;
@@ -61,6 +62,35 @@ union ramrod_data {
struct vport_stop_ramrod_data vport_stop;
struct vport_update_ramrod_data vport_update;
struct vport_filter_update_ramrod_data vport_filter_update;
+
+ struct rdma_init_func_ramrod_data rdma_init_func;
+ struct rdma_close_func_ramrod_data rdma_close_func;
+ struct rdma_register_tid_ramrod_data rdma_register_tid;
+ struct rdma_deregister_tid_ramrod_data rdma_deregister_tid;
+ struct roce_create_qp_resp_ramrod_data roce_create_qp_resp;
+ struct roce_create_qp_req_ramrod_data roce_create_qp_req;
+ struct roce_modify_qp_resp_ramrod_data roce_modify_qp_resp;
+ struct roce_modify_qp_req_ramrod_data roce_modify_qp_req;
+ struct roce_query_qp_resp_ramrod_data roce_query_qp_resp;
+ struct roce_query_qp_req_ramrod_data roce_query_qp_req;
+ struct roce_destroy_qp_resp_ramrod_data roce_destroy_qp_resp;
+ struct roce_destroy_qp_req_ramrod_data roce_destroy_qp_req;
+ struct rdma_create_cq_ramrod_data rdma_create_cq;
+ struct rdma_resize_cq_ramrod_data rdma_resize_cq;
+ struct rdma_destroy_cq_ramrod_data rdma_destroy_cq;
+ struct rdma_srq_create_ramrod_data rdma_create_srq;
+ struct rdma_srq_destroy_ramrod_data rdma_destroy_srq;
+ struct rdma_srq_modify_ramrod_data rdma_modify_srq;
+
+ struct iscsi_slow_path_hdr iscsi_empty;
+ struct iscsi_init_ramrod_params iscsi_init;
+ struct iscsi_spe_func_dstry iscsi_destroy;
+ struct iscsi_spe_conn_offload iscsi_conn_offload;
+ struct iscsi_conn_update_ramrod_params iscsi_conn_update;
+ struct iscsi_spe_conn_termination iscsi_conn_terminate;
+
+ struct vf_start_ramrod_data vf_start;
+ struct vf_stop_ramrod_data vf_stop;
};
#define EQ_MAX_CREDIT 0xffffffff
@@ -338,13 +368,29 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
* @param p_hwfn
+ * @param p_tunn
* @param mode
+ * @param allow_npar_tx_switch
*
* @return int
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- enum qed_mf_mode mode);
+ struct qed_tunn_start_params *p_tunn,
+ enum qed_mf_mode mode, bool allow_npar_tx_switch);
+
+/**
+ * @brief qed_sp_pf_update - PF Function Update Ramrod
+ *
+ * This ramrod updates function-related parameters. Every parameter can be
+ * updated independently, according to configuration flags.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
/**
* @brief qed_sp_pf_stop - PF Function Stop Ramrod
@@ -362,4 +408,18 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn);
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_tunn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data);
+/**
+ * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
+
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index 1c06c37d4c3d..a52f3fc051f5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -15,11 +15,13 @@
#include "qed.h"
#include <linux/qed/qed_chain.h>
#include "qed_cxt.h"
+#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent,
@@ -87,8 +89,218 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
return 0;
}
+static enum tunnel_clss qed_tunn_get_clss_type(u8 type)
+{
+ switch (type) {
+ case QED_TUNN_CLSS_MAC_VLAN:
+ return TUNNEL_CLSS_MAC_VLAN;
+ case QED_TUNN_CLSS_MAC_VNI:
+ return TUNNEL_CLSS_MAC_VNI;
+ case QED_TUNN_CLSS_INNER_MAC_VLAN:
+ return TUNNEL_CLSS_INNER_MAC_VLAN;
+ case QED_TUNN_CLSS_INNER_MAC_VNI:
+ return TUNNEL_CLSS_INNER_MAC_VNI;
+ default:
+ return TUNNEL_CLSS_MAC_VLAN;
+ }
+}
+
+static void
+qed_tunn_set_pf_fix_tunn_mode(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ unsigned long cached_tunn_mode = p_hwfn->cdev->tunn_mode;
+ unsigned long update_mask = p_src->tunn_mode_update_mask;
+ unsigned long tunn_mode = p_src->tunn_mode;
+ unsigned long new_tunn_mode = 0;
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_L2GRE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_L2GRE_TUNN, &new_tunn_mode);
+ }
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_IPGRE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_IPGRE_TUNN, &new_tunn_mode);
+ }
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_VXLAN_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_VXLAN_TUNN, &new_tunn_mode);
+ }
+
+ if (p_src->update_geneve_udp_port) {
+ p_tunn_cfg->set_geneve_udp_port_flg = 1;
+ p_tunn_cfg->geneve_udp_port =
+ cpu_to_le16(p_src->geneve_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_L2GENEVE_TUNN, &new_tunn_mode);
+ }
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &update_mask)) {
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+ } else {
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
+ __set_bit(QED_MODE_IPGENEVE_TUNN, &new_tunn_mode);
+ }
+
+ p_src->tunn_mode = new_tunn_mode;
+}
+
+static void
+qed_tunn_set_pf_update_params(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_src,
+ struct pf_update_tunnel_config *p_tunn_cfg)
+{
+ unsigned long tunn_mode = p_src->tunn_mode;
+ enum tunnel_clss type;
+
+ qed_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
+ p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
+ p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+ p_tunn_cfg->tunnel_clss_vxlan = type;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+ p_tunn_cfg->tunnel_clss_l2gre = type;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+ p_tunn_cfg->tunnel_clss_ipgre = type;
+
+ if (p_src->update_vxlan_udp_port) {
+ p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+ p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2gre = 1;
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgre = 1;
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_vxlan = 1;
+
+ if (p_src->update_geneve_udp_port) {
+ p_tunn_cfg->set_geneve_udp_port_flg = 1;
+ p_tunn_cfg->geneve_udp_port =
+ cpu_to_le16(p_src->geneve_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2geneve = 1;
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+ p_tunn_cfg->tunnel_clss_l2geneve = type;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+ p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
+static void qed_set_hw_tunn_mode(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ unsigned long tunn_mode)
+{
+ u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
+ u8 l2geneve_enable = 0, ipgeneve_enable = 0;
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ l2gre_enable = 1;
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ ipgre_enable = 1;
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ vxlan_enable = 1;
+
+ qed_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
+ qed_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ l2geneve_enable = 1;
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ ipgeneve_enable = 1;
+
+ qed_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
+ ipgeneve_enable);
+}
+
+static void
+qed_tunn_set_pf_start_params(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_start_params *p_src,
+ struct pf_start_tunnel_config *p_tunn_cfg)
+{
+ unsigned long tunn_mode;
+ enum tunnel_clss type;
+
+ if (!p_src)
+ return;
+
+ tunn_mode = p_src->tunn_mode;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_vxlan);
+ p_tunn_cfg->tunnel_clss_vxlan = type;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2gre);
+ p_tunn_cfg->tunnel_clss_l2gre = type;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgre);
+ p_tunn_cfg->tunnel_clss_ipgre = type;
+
+ if (p_src->update_vxlan_udp_port) {
+ p_tunn_cfg->set_vxlan_udp_port_flg = 1;
+ p_tunn_cfg->vxlan_udp_port = cpu_to_le16(p_src->vxlan_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2gre = 1;
+
+ if (test_bit(QED_MODE_IPGRE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgre = 1;
+
+ if (test_bit(QED_MODE_VXLAN_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_vxlan = 1;
+
+ if (p_src->update_geneve_udp_port) {
+ p_tunn_cfg->set_geneve_udp_port_flg = 1;
+ p_tunn_cfg->geneve_udp_port =
+ cpu_to_le16(p_src->geneve_udp_port);
+ }
+
+ if (test_bit(QED_MODE_L2GENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_l2geneve = 1;
+
+ if (test_bit(QED_MODE_IPGENEVE_TUNN, &tunn_mode))
+ p_tunn_cfg->tx_enable_ipgeneve = 1;
+
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
+ p_tunn_cfg->tunnel_clss_l2geneve = type;
+ type = qed_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
+ p_tunn_cfg->tunnel_clss_ipgeneve = type;
+}
+
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
- enum qed_mf_mode mode)
+ struct qed_tunn_start_params *p_tunn,
+ enum qed_mf_mode mode, bool allow_npar_tx_switch)
{
struct pf_start_ramrod_data *p_ramrod = NULL;
u16 sb = qed_int_get_sp_sb_id(p_hwfn);
@@ -96,6 +308,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+ u8 page_cnt;
/* update initial eq producer */
qed_eq_prod_update(p_hwfn,
@@ -120,7 +333,7 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->path_id = QED_PATH_ID(p_hwfn);
p_ramrod->dont_log_ramrods = 0;
p_ramrod->log_type_mask = cpu_to_le16(0xf);
- p_ramrod->mf_mode = mode;
+
switch (mode) {
case QED_MF_DEFAULT:
case QED_MF_NPAR:
@@ -138,21 +351,125 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
/* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
p_hwfn->p_eq->chain.pbl.p_phys_table);
- p_ramrod->event_ring_num_pages = (u8)p_hwfn->p_eq->chain.page_cnt;
-
+ page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
+ p_ramrod->event_ring_num_pages = page_cnt;
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
p_hwfn->p_consq->chain.pbl.p_phys_table);
- p_hwfn->hw_info.personality = PERSONALITY_ETH;
+ qed_tunn_set_pf_start_params(p_hwfn, p_tunn,
+ &p_ramrod->tunnel_config);
+
+ if (IS_MF_SI(p_hwfn))
+ p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
+
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ case QED_PCI_ISCSI:
+ p_ramrod->personality = PERSONALITY_ISCSI;
+ break;
+ case QED_PCI_ETH_ROCE:
+ p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unkown personality %d\n",
+ p_hwfn->hw_info.personality);
+ p_ramrod->personality = PERSONALITY_ETH;
+ }
+
+ if (p_hwfn->cdev->p_iov_info) {
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+
+ p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
+ p_ramrod->num_vfs = (u8) p_iov->total_vfs;
+ }
+ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+ p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
sb, sb_index,
p_ramrod->outer_tag);
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+
+ if (p_tunn) {
+ qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->tunn_mode);
+ p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+ }
+
+ return rc;
+}
+
+int qed_sp_pf_update(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_CB;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ qed_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
+ &p_ent->ramrod.pf_update);
+
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+/* Set pf update ramrod command params */
+int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
+ struct qed_tunn_update_params *p_tunn,
+ enum spq_mode comp_mode,
+ struct qed_spq_comp_cb *p_comp_data)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = comp_mode;
+ init_data.p_comp_data = p_comp_data;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ qed_tunn_set_pf_update_params(p_hwfn, p_tunn,
+ &p_ent->ramrod.pf_update.tunnel_config);
+
+ rc = qed_spq_post(p_hwfn, p_ent, NULL);
+ if (rc)
+ return rc;
+
+ if (p_tunn->update_vxlan_udp_port)
+ qed_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->vxlan_udp_port);
+ if (p_tunn->update_geneve_udp_port)
+ qed_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+ p_tunn->geneve_udp_port);
+
+ qed_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
+ p_hwfn->cdev->tunn_mode = p_tunn->tunn_mode;
+
+ return rc;
+}
+
int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
{
struct qed_spq_entry *p_ent = NULL;
@@ -173,3 +490,24 @@ int qed_sp_pf_stop(struct qed_hwfn *p_hwfn)
return qed_spq_post(p_hwfn, p_ent, NULL);
}
+
+int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn)
+{
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
+ &init_data);
+ if (rc)
+ return rc;
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 89469d5aae25..d73456eab1d7 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -27,6 +27,7 @@
#include "qed_mcp.h"
#include "qed_reg_addr.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
/***************************************************************************
* Structures & Definitions
@@ -212,19 +213,15 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
DQ_XCM_CORE_SPQ_PROD_CMD);
db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
-
- /* validate producer is up to-date */
- rmb();
-
db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
- /* do not reorder */
- barrier();
+ /* make sure the SPQE is updated before the doorbell */
+ wmb();
DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
/* make sure doorbell is rang */
- mmiowb();
+ wmb();
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
@@ -242,10 +239,17 @@ static int
qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
{
- DP_NOTICE(p_hwfn,
- "Unknown Async completion for protocol: %d\n",
- p_eqe->protocol_id);
- return -EINVAL;
+ switch (p_eqe->protocol_id) {
+ case PROTOCOLID_COMMON:
+ return qed_sriov_eqe_event(p_hwfn,
+ p_eqe->opcode,
+ p_eqe->echo, &p_eqe->data);
+ default:
+ DP_NOTICE(p_hwfn,
+ "Unknown Async completion for protocol: %d\n",
+ p_eqe->protocol_id);
+ return -EINVAL;
+ }
}
/***************************************************************************
@@ -335,6 +339,7 @@ struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn,
if (qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_PRODUCE,
QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
num_elem,
sizeof(union event_ring_element),
&p_eq->chain)) {
@@ -379,6 +384,9 @@ static int qed_cqe_completion(
struct eth_slow_path_rx_cqe *cqe,
enum protocol_type protocol)
{
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
/* @@@tmp - it's possible we'll eventually want to handle some
* actual commands that can arrive here, but for now this is only
* used to complete the ramrod using the echo value on the cqe
@@ -405,10 +413,10 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
***************************************************************************/
void qed_spq_setup(struct qed_hwfn *p_hwfn)
{
- struct qed_spq *p_spq = p_hwfn->p_spq;
- struct qed_spq_entry *p_virt = NULL;
- dma_addr_t p_phys = 0;
- unsigned int i = 0;
+ struct qed_spq *p_spq = p_hwfn->p_spq;
+ struct qed_spq_entry *p_virt = NULL;
+ dma_addr_t p_phys = 0;
+ u32 i, capacity;
INIT_LIST_HEAD(&p_spq->pending);
INIT_LIST_HEAD(&p_spq->completion_pending);
@@ -420,7 +428,8 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
p_virt = p_spq->p_virt;
- for (i = 0; i < p_spq->chain.capacity; i++) {
+ capacity = qed_chain_get_capacity(&p_spq->chain);
+ for (i = 0; i < capacity; i++) {
DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
list_add_tail(&p_virt->list, &p_spq->free_pool);
@@ -448,9 +457,10 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn)
int qed_spq_alloc(struct qed_hwfn *p_hwfn)
{
- struct qed_spq *p_spq = NULL;
- dma_addr_t p_phys = 0;
- struct qed_spq_entry *p_virt = NULL;
+ struct qed_spq_entry *p_virt = NULL;
+ struct qed_spq *p_spq = NULL;
+ dma_addr_t p_phys = 0;
+ u32 capacity;
/* SPQ struct */
p_spq =
@@ -464,6 +474,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
if (qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_PRODUCE,
QED_CHAIN_MODE_SINGLE,
+ QED_CHAIN_CNT_TYPE_U16,
0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element),
&p_spq->chain)) {
@@ -472,11 +483,11 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
}
/* allocate and fill the SPQ elements (incl. ramrod data list) */
+ capacity = qed_chain_get_capacity(&p_spq->chain);
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
- p_spq->chain.capacity *
+ capacity *
sizeof(struct qed_spq_entry),
- &p_phys,
- GFP_KERNEL);
+ &p_phys, GFP_KERNEL);
if (!p_virt)
goto spq_allocate_fail;
@@ -496,16 +507,18 @@ spq_allocate_fail:
void qed_spq_free(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
+ u32 capacity;
if (!p_spq)
return;
- if (p_spq->p_virt)
+ if (p_spq->p_virt) {
+ capacity = qed_chain_get_capacity(&p_spq->chain);
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
- p_spq->chain.capacity *
+ capacity *
sizeof(struct qed_spq_entry),
- p_spq->p_virt,
- p_spq->p_phys);
+ p_spq->p_virt, p_spq->p_phys);
+ }
qed_chain_free(p_hwfn->cdev, &p_spq->chain);
;
@@ -603,7 +616,9 @@ qed_spq_add_entry(struct qed_hwfn *p_hwfn,
*p_en2 = *p_ent;
- kfree(p_ent);
+ /* EBLOCK responsible to free the allocated p_ent */
+ if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
+ kfree(p_ent);
p_ent = p_en2;
}
@@ -738,6 +753,15 @@ int qed_spq_post(struct qed_hwfn *p_hwfn,
* Thus, after gaining the answer perform the cleanup here.
*/
rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
+
+ if (p_ent->queue == &p_spq->unlimited_pending) {
+ /* This is an allocated p_ent which does not need to
+ * return to pool.
+ */
+ kfree(p_ent);
+ return rc;
+ }
+
if (rc)
goto spq_post_fail2;
@@ -791,13 +815,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
* in a bitmap and increasing the chain consumer only
* for the first successive completed entries.
*/
- bitmap_set(p_spq->p_comp_bitmap, pos, SPQ_RING_SIZE);
+ __set_bit(pos, p_spq->p_comp_bitmap);
while (test_bit(p_spq->comp_bitmap_idx,
p_spq->p_comp_bitmap)) {
- bitmap_clear(p_spq->p_comp_bitmap,
- p_spq->comp_bitmap_idx,
- SPQ_RING_SIZE);
+ __clear_bit(p_spq->comp_bitmap_idx,
+ p_spq->p_comp_bitmap);
p_spq->comp_bitmap_idx++;
qed_chain_return_produced(&p_spq->chain);
}
@@ -833,8 +856,12 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
fw_return_code);
- if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
- /* EBLOCK is responsible for freeing its own entry */
+ if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
+ (found->queue == &p_spq->unlimited_pending))
+ /* EBLOCK is responsible for returning its own entry into the
+ * free list, unless it originally added the entry into the
+ * unlimited pending list.
+ */
qed_spq_return_entry(p_hwfn, found);
/* Attempt to post pending requests */
@@ -860,9 +887,9 @@ struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
if (qed_chain_alloc(p_hwfn->cdev,
QED_CHAIN_USE_TO_PRODUCE,
QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
QED_CHAIN_PAGE_SIZE / 0x80,
- 0x80,
- &p_consq->chain)) {
+ 0x80, &p_consq->chain)) {
DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
goto consq_allocate_fail;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
new file mode 100644
index 000000000000..15399da268d9
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -0,0 +1,3859 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/crc32.h>
+#include <linux/qed/qed_iov_if.h>
+#include "qed_cxt.h"
+#include "qed_hsi.h"
+#include "qed_hw.h"
+#include "qed_init_ops.h"
+#include "qed_int.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+#include "qed_sp.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+/* IOV ramrods */
+static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf)
+{
+ struct vf_start_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+ u8 fp_minor;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = p_vf->opaque_fid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_START,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_start;
+
+ p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
+ p_ramrod->opaque_fid = cpu_to_le16(p_vf->opaque_fid);
+
+ switch (p_hwfn->hw_info.personality) {
+ case QED_PCI_ETH:
+ p_ramrod->personality = PERSONALITY_ETH;
+ break;
+ case QED_PCI_ETH_ROCE:
+ p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+ break;
+ default:
+ DP_NOTICE(p_hwfn, "Unknown VF personality %d\n",
+ p_hwfn->hw_info.personality);
+ return -EINVAL;
+ }
+
+ fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
+ if (fp_minor > ETH_HSI_VER_MINOR) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF [%d] - Requested fp hsi %02x.%02x which is slightly newer than PF's %02x.%02x; Configuring PFs version\n",
+ p_vf->abs_vf_id,
+ ETH_HSI_VER_MAJOR,
+ fp_minor, ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+ fp_minor = ETH_HSI_VER_MINOR;
+ }
+
+ p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+ p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] - Starting using HSI %02x.%02x\n",
+ p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
+ u32 concrete_vfid, u16 opaque_vfid)
+{
+ struct vf_stop_ramrod_data *p_ramrod = NULL;
+ struct qed_spq_entry *p_ent = NULL;
+ struct qed_sp_init_data init_data;
+ int rc = -EINVAL;
+
+ /* Get SPQ entry */
+ memset(&init_data, 0, sizeof(init_data));
+ init_data.cid = qed_spq_get_cid(p_hwfn);
+ init_data.opaque_fid = opaque_vfid;
+ init_data.comp_mode = QED_SPQ_MODE_EBLOCK;
+
+ rc = qed_sp_init_request(p_hwfn, &p_ent,
+ COMMON_RAMROD_VF_STOP,
+ PROTOCOLID_COMMON, &init_data);
+ if (rc)
+ return rc;
+
+ p_ramrod = &p_ent->ramrod.vf_stop;
+
+ p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+
+ return qed_spq_post(p_hwfn, p_ent, NULL);
+}
+
+bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
+ int rel_vf_id, bool b_enabled_only)
+{
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->cdev, "No iov info\n");
+ return false;
+ }
+
+ if ((rel_vf_id >= p_hwfn->cdev->p_iov_info->total_vfs) ||
+ (rel_vf_id < 0))
+ return false;
+
+ if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
+ b_enabled_only)
+ return false;
+
+ return true;
+}
+
+static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct qed_vf_info *vf = NULL;
+
+ if (!p_hwfn->pf_iov_info) {
+ DP_NOTICE(p_hwfn->cdev, "No iov info\n");
+ return NULL;
+ }
+
+ if (qed_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
+ vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
+ else
+ DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
+ relative_vf_id);
+
+ return vf;
+}
+
+static bool qed_iov_validate_rxq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u16 rx_qid)
+{
+ if (rx_qid >= p_vf->num_rxqs)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[0x%02x] - can't touch Rx queue[%04x]; Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
+ return rx_qid < p_vf->num_rxqs;
+}
+
+static bool qed_iov_validate_txq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u16 tx_qid)
+{
+ if (tx_qid >= p_vf->num_txqs)
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[0x%02x] - can't touch Tx queue[%04x]; Only 0x%04x are allocated\n",
+ p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
+ return tx_qid < p_vf->num_txqs;
+}
+
+static bool qed_iov_validate_sb(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u16 sb_idx)
+{
+ int i;
+
+ for (i = 0; i < p_vf->num_sbs; i++)
+ if (p_vf->igu_sbs[i] == sb_idx)
+ return true;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[0%02x] - tried using sb_idx %04x which doesn't exist as one of its 0x%02x SBs\n",
+ p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
+
+ return false;
+}
+
+int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
+ int vfid, struct qed_ptt *p_ptt)
+{
+ struct qed_bulletin_content *p_bulletin;
+ int crc_size = sizeof(p_bulletin->crc);
+ struct qed_dmae_params params;
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf)
+ return -EINVAL;
+
+ if (!p_vf->vf_bulletin)
+ return -EINVAL;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ /* Increment bulletin board version and compute crc */
+ p_bulletin->version++;
+ p_bulletin->crc = crc32(0, (u8 *)p_bulletin + crc_size,
+ p_vf->bulletin.size - crc_size);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
+ p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
+
+ /* propagate bulletin board via dmae to vm memory */
+ memset(&params, 0, sizeof(params));
+ params.flags = QED_DMAE_FLAG_VF_DST;
+ params.dst_vfid = p_vf->abs_vf_id;
+ return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
+ p_vf->vf_bulletin, p_vf->bulletin.size / 4,
+ &params);
+}
+
+static int qed_iov_pci_cfg_info(struct qed_dev *cdev)
+{
+ struct qed_hw_sriov_info *iov = cdev->p_iov_info;
+ int pos = iov->pos;
+
+ DP_VERBOSE(cdev, QED_MSG_IOV, "sriov ext pos %d\n", pos);
+ pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_INITIAL_VF, &iov->initial_vfs);
+
+ pci_read_config_word(cdev->pdev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
+ if (iov->num_vfs) {
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV,
+ "Number of VFs are already set to non-zero value. Ignoring PCI configuration value\n");
+ iov->num_vfs = 0;
+ }
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
+
+ pci_read_config_word(cdev->pdev,
+ pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
+
+ pci_read_config_dword(cdev->pdev,
+ pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
+
+ pci_read_config_dword(cdev->pdev, pos + PCI_SRIOV_CAP, &iov->cap);
+
+ pci_read_config_byte(cdev->pdev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
+
+ DP_VERBOSE(cdev,
+ QED_MSG_IOV,
+ "IOV info: nres %d, cap 0x%x, ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d, stride %d, page size 0x%x\n",
+ iov->nres,
+ iov->cap,
+ iov->ctrl,
+ iov->total_vfs,
+ iov->initial_vfs,
+ iov->nr_virtfn, iov->offset, iov->stride, iov->pgsz);
+
+ /* Some sanity checks */
+ if (iov->num_vfs > NUM_OF_VFS(cdev) ||
+ iov->total_vfs > NUM_OF_VFS(cdev)) {
+ /* This can happen only due to a bug. In this case we set
+ * num_vfs to zero to avoid memory corruption in the code that
+ * assumes max number of vfs
+ */
+ DP_NOTICE(cdev,
+ "IOV: Unexpected number of vfs set: %d setting num_vf to zero\n",
+ iov->num_vfs);
+
+ iov->num_vfs = 0;
+ iov->total_vfs = 0;
+ }
+
+ return 0;
+}
+
+static void qed_iov_clear_vf_igu_blocks(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt)
+{
+ struct qed_igu_block *p_sb;
+ u16 sb_id;
+ u32 val;
+
+ if (!p_hwfn->hw_info.p_igu_info) {
+ DP_ERR(p_hwfn,
+ "qed_iov_clear_vf_igu_blocks IGU Info not initialized\n");
+ return;
+ }
+
+ for (sb_id = 0; sb_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev);
+ sb_id++) {
+ p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
+ if ((p_sb->status & QED_IGU_STATUS_FREE) &&
+ !(p_sb->status & QED_IGU_STATUS_PF)) {
+ val = qed_rd(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sb_id * 4);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+ qed_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
+ }
+ }
+}
+
+static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
+{
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+ struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+ struct qed_bulletin_content *p_bulletin_virt;
+ dma_addr_t req_p, rply_p, bulletin_p;
+ union pfvf_tlvs *p_reply_virt_addr;
+ union vfpf_tlvs *p_req_virt_addr;
+ u8 idx = 0;
+
+ memset(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
+
+ p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
+ req_p = p_iov_info->mbx_msg_phys_addr;
+ p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
+ rply_p = p_iov_info->mbx_reply_phys_addr;
+ p_bulletin_virt = p_iov_info->p_bulletins;
+ bulletin_p = p_iov_info->bulletins_phys;
+ if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
+ DP_ERR(p_hwfn,
+ "qed_iov_setup_vfdb called without allocating mem first\n");
+ return;
+ }
+
+ for (idx = 0; idx < p_iov->total_vfs; idx++) {
+ struct qed_vf_info *vf = &p_iov_info->vfs_array[idx];
+ u32 concrete;
+
+ vf->vf_mbx.req_virt = p_req_virt_addr + idx;
+ vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
+ vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
+ vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
+
+ vf->state = VF_STOPPED;
+ vf->b_init = false;
+
+ vf->bulletin.phys = idx *
+ sizeof(struct qed_bulletin_content) +
+ bulletin_p;
+ vf->bulletin.p_virt = p_bulletin_virt + idx;
+ vf->bulletin.size = sizeof(struct qed_bulletin_content);
+
+ vf->relative_vf_id = idx;
+ vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
+ concrete = qed_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
+ vf->concrete_fid = concrete;
+ vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
+ (vf->abs_vf_id << 8);
+ vf->vport_id = idx + 1;
+
+ vf->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
+ vf->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+ }
+}
+
+static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
+{
+ struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+ void **p_v_addr;
+ u16 num_vfs = 0;
+
+ num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
+
+ /* Allocate PF Mailbox buffer (per-VF) */
+ p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_msg_virt_addr;
+ *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_msg_size,
+ &p_iov_info->mbx_msg_phys_addr,
+ GFP_KERNEL);
+ if (!*p_v_addr)
+ return -ENOMEM;
+
+ /* Allocate PF Mailbox Reply buffer (per-VF) */
+ p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
+ p_v_addr = &p_iov_info->mbx_reply_virt_addr;
+ *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_reply_size,
+ &p_iov_info->mbx_reply_phys_addr,
+ GFP_KERNEL);
+ if (!*p_v_addr)
+ return -ENOMEM;
+
+ p_iov_info->bulletins_size = sizeof(struct qed_bulletin_content) *
+ num_vfs;
+ p_v_addr = &p_iov_info->p_bulletins;
+ *p_v_addr = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->bulletins_size,
+ &p_iov_info->bulletins_phys,
+ GFP_KERNEL);
+ if (!*p_v_addr)
+ return -ENOMEM;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
+ p_iov_info->mbx_msg_virt_addr,
+ (u64) p_iov_info->mbx_msg_phys_addr,
+ p_iov_info->mbx_reply_virt_addr,
+ (u64) p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
+
+ return 0;
+}
+
+static void qed_iov_free_vfdb(struct qed_hwfn *p_hwfn)
+{
+ struct qed_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
+
+ if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_msg_size,
+ p_iov_info->mbx_msg_virt_addr,
+ p_iov_info->mbx_msg_phys_addr);
+
+ if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->mbx_reply_size,
+ p_iov_info->mbx_reply_virt_addr,
+ p_iov_info->mbx_reply_phys_addr);
+
+ if (p_iov_info->p_bulletins)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov_info->bulletins_size,
+ p_iov_info->p_bulletins,
+ p_iov_info->bulletins_phys);
+}
+
+int qed_iov_alloc(struct qed_hwfn *p_hwfn)
+{
+ struct qed_pf_iov *p_sriov;
+
+ if (!IS_PF_SRIOV(p_hwfn)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "No SR-IOV - no need for IOV db\n");
+ return 0;
+ }
+
+ p_sriov = kzalloc(sizeof(*p_sriov), GFP_KERNEL);
+ if (!p_sriov) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+ return -ENOMEM;
+ }
+
+ p_hwfn->pf_iov_info = p_sriov;
+
+ return qed_iov_allocate_vfdb(p_hwfn);
+}
+
+void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
+ return;
+
+ qed_iov_setup_vfdb(p_hwfn);
+ qed_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
+}
+
+void qed_iov_free(struct qed_hwfn *p_hwfn)
+{
+ if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
+ qed_iov_free_vfdb(p_hwfn);
+ kfree(p_hwfn->pf_iov_info);
+ }
+}
+
+void qed_iov_free_hw_info(struct qed_dev *cdev)
+{
+ kfree(cdev->p_iov_info);
+ cdev->p_iov_info = NULL;
+}
+
+int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
+{
+ struct qed_dev *cdev = p_hwfn->cdev;
+ int pos;
+ int rc;
+
+ if (IS_VF(p_hwfn->cdev))
+ return 0;
+
+ /* Learn the PCI configuration */
+ pos = pci_find_ext_capability(p_hwfn->cdev->pdev,
+ PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No PCIe IOV support\n");
+ return 0;
+ }
+
+ /* Allocate a new struct for IOV information */
+ cdev->p_iov_info = kzalloc(sizeof(*cdev->p_iov_info), GFP_KERNEL);
+ if (!cdev->p_iov_info) {
+ DP_NOTICE(p_hwfn, "Can't support IOV due to lack of memory\n");
+ return -ENOMEM;
+ }
+ cdev->p_iov_info->pos = pos;
+
+ rc = qed_iov_pci_cfg_info(cdev);
+ if (rc)
+ return rc;
+
+ /* We want PF IOV to be synonemous with the existance of p_iov_info;
+ * In case the capability is published but there are no VFs, simply
+ * de-allocate the struct.
+ */
+ if (!cdev->p_iov_info->total_vfs) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "IOV capabilities, but no VFs are published\n");
+ kfree(cdev->p_iov_info);
+ cdev->p_iov_info = NULL;
+ return 0;
+ }
+
+ /* Calculate the first VF index - this is a bit tricky; Basically,
+ * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
+ * after the first engine's VFs.
+ */
+ cdev->p_iov_info->first_vf_in_pf = p_hwfn->cdev->p_iov_info->offset +
+ p_hwfn->abs_pf_id - 16;
+ if (QED_PATH_ID(p_hwfn))
+ cdev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "First VF in hwfn 0x%08x\n",
+ cdev->p_iov_info->first_vf_in_pf);
+
+ return 0;
+}
+
+static bool qed_iov_pf_sanity_check(struct qed_hwfn *p_hwfn, int vfid)
+{
+ /* Check PF supports sriov */
+ if (IS_VF(p_hwfn->cdev) || !IS_QED_SRIOV(p_hwfn->cdev) ||
+ !IS_PF_SRIOV_ALLOC(p_hwfn))
+ return false;
+
+ /* Check VF validity */
+ if (!qed_iov_is_valid_vfid(p_hwfn, vfid, true))
+ return false;
+
+ return true;
+}
+
+static void qed_iov_set_vf_to_disable(struct qed_dev *cdev,
+ u16 rel_vf_id, u8 to_disable)
+{
+ struct qed_vf_info *vf;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!vf)
+ continue;
+
+ vf->to_disable = to_disable;
+ }
+}
+
+void qed_iov_set_vfs_to_disable(struct qed_dev *cdev, u8 to_disable)
+{
+ u16 i;
+
+ if (!IS_QED_SRIOV(cdev))
+ return;
+
+ for (i = 0; i < cdev->p_iov_info->total_vfs; i++)
+ qed_iov_set_vf_to_disable(cdev, i, to_disable);
+}
+
+static void qed_iov_vf_pglue_clear_err(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u8 abs_vfid)
+{
+ qed_wr(p_hwfn, p_ptt,
+ PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
+ 1 << (abs_vfid & 0x1f));
+}
+
+static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_vf_info *vf)
+{
+ int i;
+
+ /* Set VF masks and configuration - pretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
+
+ /* unpretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+ /* iterate over all queues, clear sb consumer */
+ for (i = 0; i < vf->num_sbs; i++)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, true);
+}
+
+static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, bool enable)
+{
+ u32 igu_vf_conf;
+
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+ igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
+
+ if (enable)
+ igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
+ else
+ igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
+
+ qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
+
+ /* unpretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+}
+
+static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
+ int rc;
+
+ if (vf->to_disable)
+ return 0;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Enable internal access for vf %x [abs %x]\n",
+ vf->abs_vf_id, QED_VF_ABS_ID(p_hwfn, vf));
+
+ qed_iov_vf_pglue_clear_err(p_hwfn, p_ptt, QED_VF_ABS_ID(p_hwfn, vf));
+
+ qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+ rc = qed_mcp_config_vf_msix(p_hwfn, p_ptt, vf->abs_vf_id, vf->num_sbs);
+ if (rc)
+ return rc;
+
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+
+ SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
+ STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
+
+ qed_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
+ p_hwfn->hw_info.hw_mode);
+
+ /* unpretend */
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+ vf->state = VF_FREE;
+
+ return rc;
+}
+
+/**
+ * @brief qed_iov_config_perm_table - configure the permission
+ * zone table.
+ * In E4, queue zone permission table size is 320x9. There
+ * are 320 VF queues for single engine device (256 for dual
+ * engine device), and each entry has the following format:
+ * {Valid, VF[7:0]}
+ * @param p_hwfn
+ * @param p_ptt
+ * @param vf
+ * @param enable
+ */
+static void qed_iov_config_perm_table(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, u8 enable)
+{
+ u32 reg_addr, val;
+ u16 qzone_id = 0;
+ int qid;
+
+ for (qid = 0; qid < vf->num_rxqs; qid++) {
+ qed_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
+ &qzone_id);
+
+ reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
+ val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
+ qed_wr(p_hwfn, p_ptt, reg_addr, val);
+ }
+}
+
+static void qed_iov_enable_vf_traffic(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ /* Reset vf in IGU - interrupts are still disabled */
+ qed_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
+
+ /* Permission Table */
+ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
+}
+
+static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, u16 num_rx_queues)
+{
+ struct qed_igu_block *igu_blocks;
+ int qid = 0, igu_id = 0;
+ u32 val = 0;
+
+ igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
+
+ if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
+ num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
+ p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+
+ SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
+ SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
+
+ while ((qid < num_rx_queues) &&
+ (igu_id < QED_MAPPING_MEMORY_SIZE(p_hwfn->cdev))) {
+ if (igu_blocks[igu_id].status & QED_IGU_STATUS_FREE) {
+ struct cau_sb_entry sb_entry;
+
+ vf->igu_sbs[qid] = (u16)igu_id;
+ igu_blocks[igu_id].status &= ~QED_IGU_STATUS_FREE;
+
+ SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+ qed_wr(p_hwfn, p_ptt,
+ IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
+ val);
+
+ /* Configure igu sb in CAU which were marked valid */
+ qed_init_cau_sb_entry(p_hwfn, &sb_entry,
+ p_hwfn->rel_pf_id,
+ vf->abs_vf_id, 1);
+ qed_dmae_host2grc(p_hwfn, p_ptt,
+ (u64)(uintptr_t)&sb_entry,
+ CAU_REG_SB_VAR_MEMORY +
+ igu_id * sizeof(u64), 2, 0);
+ qid++;
+ }
+ igu_id++;
+ }
+
+ vf->num_sbs = (u8) num_rx_queues;
+
+ return vf->num_sbs;
+}
+
+static void qed_iov_free_vf_igu_sbs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
+ int idx, igu_id;
+ u32 addr, val;
+
+ /* Invalidate igu CAM lines and mark them as free */
+ for (idx = 0; idx < vf->num_sbs; idx++) {
+ igu_id = vf->igu_sbs[idx];
+ addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
+
+ val = qed_rd(p_hwfn, p_ptt, addr);
+ SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
+ qed_wr(p_hwfn, p_ptt, addr, val);
+
+ p_info->igu_map.igu_blocks[igu_id].status |=
+ QED_IGU_STATUS_FREE;
+
+ p_hwfn->hw_info.p_igu_info->free_blks++;
+ }
+
+ vf->num_sbs = 0;
+}
+
+static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 rel_vf_id, u16 num_rx_queues)
+{
+ u8 num_of_vf_avaiable_chains = 0;
+ struct qed_vf_info *vf = NULL;
+ int rc = 0;
+ u32 cids;
+ u8 i;
+
+ vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!vf) {
+ DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
+ return -EINVAL;
+ }
+
+ if (vf->b_init) {
+ DP_NOTICE(p_hwfn, "VF[%d] is already active.\n", rel_vf_id);
+ return -EINVAL;
+ }
+
+ /* Limit number of queues according to number of CIDs */
+ qed_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - requesting to initialize for 0x%04x queues [0x%04x CIDs available]\n",
+ vf->relative_vf_id, num_rx_queues, (u16) cids);
+ num_rx_queues = min_t(u16, num_rx_queues, ((u16) cids));
+
+ num_of_vf_avaiable_chains = qed_iov_alloc_vf_igu_sbs(p_hwfn,
+ p_ptt,
+ vf,
+ num_rx_queues);
+ if (!num_of_vf_avaiable_chains) {
+ DP_ERR(p_hwfn, "no available igu sbs\n");
+ return -ENOMEM;
+ }
+
+ /* Choose queue number and index ranges */
+ vf->num_rxqs = num_of_vf_avaiable_chains;
+ vf->num_txqs = num_of_vf_avaiable_chains;
+
+ for (i = 0; i < vf->num_rxqs; i++) {
+ u16 queue_id = qed_int_queue_id_from_sb_id(p_hwfn,
+ vf->igu_sbs[i]);
+
+ if (queue_id > RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
+ DP_NOTICE(p_hwfn,
+ "VF[%d] will require utilizing of out-of-bounds queues - %04x\n",
+ vf->relative_vf_id, queue_id);
+ return -EINVAL;
+ }
+
+ /* CIDs are per-VF, so no problem having them 0-based. */
+ vf->vf_queues[i].fw_rx_qid = queue_id;
+ vf->vf_queues[i].fw_tx_qid = queue_id;
+ vf->vf_queues[i].fw_cid = i;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
+ vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+ }
+ rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, vf);
+ if (!rc) {
+ vf->b_init = true;
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->cdev->p_iov_info->num_vfs++;
+ }
+
+ return rc;
+}
+
+static void qed_iov_set_link(struct qed_hwfn *p_hwfn,
+ u16 vfid,
+ struct qed_mcp_link_params *params,
+ struct qed_mcp_link_state *link,
+ struct qed_mcp_link_capabilities *p_caps)
+{
+ struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+ vfid,
+ false);
+ struct qed_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+ p_bulletin->req_autoneg = params->speed.autoneg;
+ p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+ p_bulletin->req_forced_speed = params->speed.forced_speed;
+ p_bulletin->req_autoneg_pause = params->pause.autoneg;
+ p_bulletin->req_forced_rx = params->pause.forced_rx;
+ p_bulletin->req_forced_tx = params->pause.forced_tx;
+ p_bulletin->req_loopback = params->loopback_mode;
+
+ p_bulletin->link_up = link->link_up;
+ p_bulletin->speed = link->speed;
+ p_bulletin->full_duplex = link->full_duplex;
+ p_bulletin->autoneg = link->an;
+ p_bulletin->autoneg_complete = link->an_complete;
+ p_bulletin->parallel_detection = link->parallel_detection;
+ p_bulletin->pfc_enabled = link->pfc_enabled;
+ p_bulletin->partner_adv_speed = link->partner_adv_speed;
+ p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+ p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+ p_bulletin->partner_adv_pause = link->partner_adv_pause;
+ p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+ p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u16 rel_vf_id)
+{
+ struct qed_mcp_link_capabilities caps;
+ struct qed_mcp_link_params params;
+ struct qed_mcp_link_state link;
+ struct qed_vf_info *vf = NULL;
+
+ vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!vf) {
+ DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
+ return -EINVAL;
+ }
+
+ if (vf->bulletin.p_virt)
+ memset(vf->bulletin.p_virt, 0, sizeof(*vf->bulletin.p_virt));
+
+ memset(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
+
+ /* Get the link configuration back in bulletin so
+ * that when VFs are re-enabled they get the actual
+ * link configuration.
+ */
+ memcpy(&params, qed_mcp_get_link_params(p_hwfn), sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(p_hwfn), sizeof(link));
+ memcpy(&caps, qed_mcp_get_link_capabilities(p_hwfn), sizeof(caps));
+ qed_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
+
+ /* Forget the VF's acquisition message */
+ memset(&vf->acquire, 0, sizeof(vf->acquire));
+
+ /* disablng interrupts and resetting permission table was done during
+ * vf-close, however, we could get here without going through vf_close
+ */
+ /* Disable Interrupts for VF */
+ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+ /* Reset Permission table */
+ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+ vf->num_rxqs = 0;
+ vf->num_txqs = 0;
+ qed_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
+
+ if (vf->b_init) {
+ vf->b_init = false;
+
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->cdev->p_iov_info->num_vfs--;
+ }
+
+ return 0;
+}
+
+static bool qed_iov_tlv_supported(u16 tlvtype)
+{
+ return CHANNEL_TLV_NONE < tlvtype && tlvtype < CHANNEL_TLV_MAX;
+}
+
+/* place a given tlv on the tlv buffer, continuing current tlv list */
+void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length)
+{
+ struct channel_tlv *tl = (struct channel_tlv *)*offset;
+
+ tl->type = type;
+ tl->length = length;
+
+ /* Offset should keep pointing to next TLV (the end of the last) */
+ *offset += length;
+
+ /* Return a pointer to the start of the added tlv */
+ return *offset - length;
+}
+
+/* list the types and lengths of the tlvs on the buffer */
+void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list)
+{
+ u16 i = 1, total_length = 0;
+ struct channel_tlv *tlv;
+
+ do {
+ tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
+
+ /* output tlv */
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "TLV number %d: type %d, length %d\n",
+ i, tlv->type, tlv->length);
+
+ if (tlv->type == CHANNEL_TLV_LIST_END)
+ return;
+
+ /* Validate entry - protect against malicious VFs */
+ if (!tlv->length) {
+ DP_NOTICE(p_hwfn, "TLV of length 0 found\n");
+ return;
+ }
+
+ total_length += tlv->length;
+
+ if (total_length >= sizeof(struct tlv_buffer_size)) {
+ DP_NOTICE(p_hwfn, "TLV ==> Buffer overflow\n");
+ return;
+ }
+
+ i++;
+ } while (1);
+}
+
+static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf,
+ u16 length, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct qed_dmae_params params;
+ u8 eng_vf_id;
+
+ mbx->reply_virt->default_resp.hdr.status = status;
+
+ qed_dp_tlv_list(p_hwfn, mbx->reply_virt);
+
+ eng_vf_id = p_vf->abs_vf_id;
+
+ memset(&params, 0, sizeof(struct qed_dmae_params));
+ params.flags = QED_DMAE_FLAG_VF_DST;
+ params.dst_vfid = eng_vf_id;
+
+ qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
+ mbx->req_virt->first_tlv.reply_address +
+ sizeof(u64),
+ (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
+ &params);
+
+ qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
+ mbx->req_virt->first_tlv.reply_address,
+ sizeof(u64) / 4, &params);
+
+ REG_WR(p_hwfn,
+ GTT_BAR0_MAP_REG_USDM_RAM +
+ USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+}
+
+static u16 qed_iov_vport_to_tlv(struct qed_hwfn *p_hwfn,
+ enum qed_iov_vport_update_flag flag)
+{
+ switch (flag) {
+ case QED_IOV_VP_UPDATE_ACTIVATE:
+ return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ case QED_IOV_VP_UPDATE_VLAN_STRIP:
+ return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+ case QED_IOV_VP_UPDATE_TX_SWITCH:
+ return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ case QED_IOV_VP_UPDATE_MCAST:
+ return CHANNEL_TLV_VPORT_UPDATE_MCAST;
+ case QED_IOV_VP_UPDATE_ACCEPT_PARAM:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ case QED_IOV_VP_UPDATE_RSS:
+ return CHANNEL_TLV_VPORT_UPDATE_RSS;
+ case QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
+ return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ case QED_IOV_VP_UPDATE_SGE_TPA:
+ return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+ default:
+ return 0;
+ }
+}
+
+static u16 qed_iov_prep_vp_update_resp_tlvs(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_iov_vf_mbx *p_mbx,
+ u8 status,
+ u16 tlvs_mask, u16 tlvs_accepted)
+{
+ struct pfvf_def_resp_tlv *resp;
+ u16 size, total_len, i;
+
+ memset(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
+ p_mbx->offset = (u8 *)p_mbx->reply_virt;
+ size = sizeof(struct pfvf_def_resp_tlv);
+ total_len = size;
+
+ qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+
+ /* Prepare response for all extended tlvs if they are found by PF */
+ for (i = 0; i < QED_IOV_VP_UPDATE_MAX; i++) {
+ if (!(tlvs_mask & (1 << i)))
+ continue;
+
+ resp = qed_add_tlv(p_hwfn, &p_mbx->offset,
+ qed_iov_vport_to_tlv(p_hwfn, i), size);
+
+ if (tlvs_accepted & (1 << i))
+ resp->hdr.status = status;
+ else
+ resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - vport_update response: TLV %d, status %02x\n",
+ p_vf->relative_vf_id,
+ qed_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+
+ total_len += size;
+ }
+
+ qed_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ return total_len;
+}
+
+static void qed_iov_prepare_resp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf_info,
+ u16 type, u16 length, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &vf_info->vf_mbx;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ qed_add_tlv(p_hwfn, &mbx->offset, type, length);
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ qed_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
+}
+
+struct qed_public_vf_info *qed_iov_get_public_vf_info(struct qed_hwfn *p_hwfn,
+ u16 relative_vf_id,
+ bool b_enabled_only)
+{
+ struct qed_vf_info *vf = NULL;
+
+ vf = qed_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
+ if (!vf)
+ return NULL;
+
+ return &vf->p_vf_info;
+}
+
+void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(p_hwfn, vfid, false);
+
+ if (!vf_info)
+ return;
+
+ /* Clear the VF mac */
+ memset(vf_info->mac, 0, ETH_ALEN);
+}
+
+static void qed_iov_vf_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ u32 i;
+
+ p_vf->vf_bulletin = 0;
+ p_vf->vport_instance = 0;
+ p_vf->configured_features = 0;
+
+ /* If VF previously requested less resources, go back to default */
+ p_vf->num_rxqs = p_vf->num_sbs;
+ p_vf->num_txqs = p_vf->num_sbs;
+
+ p_vf->num_active_rxqs = 0;
+
+ for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++)
+ p_vf->vf_queues[i].rxq_active = 0;
+
+ memset(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+ memset(&p_vf->acquire, 0, sizeof(p_vf->acquire));
+ qed_iov_clean_vf(p_hwfn, p_vf->relative_vf_id);
+}
+
+static u8 qed_iov_vf_mbx_acquire_resc(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ int i;
+
+ /* Queue related information */
+ p_resp->num_rxqs = p_vf->num_rxqs;
+ p_resp->num_txqs = p_vf->num_txqs;
+ p_resp->num_sbs = p_vf->num_sbs;
+
+ for (i = 0; i < p_resp->num_sbs; i++) {
+ p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
+ p_resp->hw_sbs[i].sb_qid = 0;
+ }
+
+ /* These fields are filled for backward compatibility.
+ * Unused by modern vfs.
+ */
+ for (i = 0; i < p_resp->num_rxqs; i++) {
+ qed_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
+ (u16 *)&p_resp->hw_qid[i]);
+ p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
+ }
+
+ /* Filter related information */
+ p_resp->num_mac_filters = min_t(u8, p_vf->num_mac_filters,
+ p_req->num_mac_filters);
+ p_resp->num_vlan_filters = min_t(u8, p_vf->num_vlan_filters,
+ p_req->num_vlan_filters);
+
+ /* This isn't really needed/enforced, but some legacy VFs might depend
+ * on the correct filling of this field.
+ */
+ p_resp->num_mc_filters = QED_MAX_MC_ADDRS;
+
+ /* Validate sufficient resources for VF */
+ if (p_resp->num_rxqs < p_req->num_rxqs ||
+ p_resp->num_txqs < p_req->num_txqs ||
+ p_resp->num_sbs < p_req->num_sbs ||
+ p_resp->num_mac_filters < p_req->num_mac_filters ||
+ p_resp->num_vlan_filters < p_req->num_vlan_filters ||
+ p_resp->num_mc_filters < p_req->num_mc_filters) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]\n",
+ p_vf->abs_vf_id,
+ p_req->num_rxqs,
+ p_resp->num_rxqs,
+ p_req->num_rxqs,
+ p_resp->num_txqs,
+ p_req->num_sbs,
+ p_resp->num_sbs,
+ p_req->num_mac_filters,
+ p_resp->num_mac_filters,
+ p_req->num_vlan_filters,
+ p_resp->num_vlan_filters,
+ p_req->num_mc_filters, p_resp->num_mc_filters);
+ return PFVF_STATUS_NO_RESOURCE;
+ }
+
+ return PFVF_STATUS_SUCCESS;
+}
+
+static void qed_iov_vf_mbx_acquire_stats(struct qed_hwfn *p_hwfn,
+ struct pfvf_stats_info *p_stats)
+{
+ p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
+ offsetof(struct mstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+ p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
+ offsetof(struct ustorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+ p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
+ offsetof(struct pstorm_vf_zone,
+ non_trigger.eth_queue_stat);
+ p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+ p_stats->tstats.address = 0;
+ p_stats->tstats.len = 0;
+}
+
+static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
+ u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+ struct pf_vf_resc *resc = &resp->resc;
+ int rc;
+
+ memset(resp, 0, sizeof(*resp));
+
+ /* Validate FW compatibility */
+ if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
+ DP_INFO(p_hwfn,
+ "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
+ vf->abs_vf_id,
+ req->vfdev_info.eth_fp_hsi_major,
+ req->vfdev_info.eth_fp_hsi_minor,
+ ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+
+ /* Write the PF version so that VF would know which version
+ * is supported.
+ */
+ pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
+ pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+
+ goto out;
+ }
+
+ /* On 100g PFs, prevent old VFs from loading */
+ if ((p_hwfn->cdev->num_hwfns > 1) &&
+ !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
+ DP_INFO(p_hwfn,
+ "VF[%d] is running an old driver that doesn't support 100g\n",
+ vf->abs_vf_id);
+ goto out;
+ }
+
+ /* Store the acquire message */
+ memcpy(&vf->acquire, req, sizeof(vf->acquire));
+
+ vf->opaque_fid = req->vfdev_info.opaque_fid;
+
+ vf->vf_bulletin = req->bulletin_addr;
+ vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
+ vf->bulletin.size : req->bulletin_size;
+
+ /* fill in pfdev info */
+ pfdev_info->chip_num = p_hwfn->cdev->chip_num;
+ pfdev_info->db_size = 0;
+ pfdev_info->indices_per_sb = PIS_PER_SB;
+
+ pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
+ if (p_hwfn->cdev->num_hwfns > 1)
+ pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
+
+ qed_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
+
+ memcpy(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, ETH_ALEN);
+
+ pfdev_info->fw_major = FW_MAJOR_VERSION;
+ pfdev_info->fw_minor = FW_MINOR_VERSION;
+ pfdev_info->fw_rev = FW_REVISION_VERSION;
+ pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+ pfdev_info->minor_fp_hsi = min_t(u8,
+ ETH_HSI_VER_MINOR,
+ req->vfdev_info.eth_fp_hsi_minor);
+ pfdev_info->os_type = VFPF_ACQUIRE_OS_LINUX;
+ qed_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver, NULL);
+
+ pfdev_info->dev_type = p_hwfn->cdev->type;
+ pfdev_info->chip_rev = p_hwfn->cdev->chip_rev;
+
+ /* Fill resources available to VF; Make sure there are enough to
+ * satisfy the VF's request.
+ */
+ vfpf_status = qed_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
+ &req->resc_request, resc);
+ if (vfpf_status != PFVF_STATUS_SUCCESS)
+ goto out;
+
+ /* Start the VF in FW */
+ rc = qed_sp_vf_start(p_hwfn, vf);
+ if (rc) {
+ DP_NOTICE(p_hwfn, "Failed to start VF[%02x]\n", vf->abs_vf_id);
+ vfpf_status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Fill agreed size of bulletin board in response */
+ resp->bulletin_size = vf->bulletin.size;
+ qed_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x, db_size=%d, idx_per_sb=%d, pf_cap=0x%llx\n"
+ "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d, n_vlans-%d\n",
+ vf->abs_vf_id,
+ resp->pfdev_info.chip_num,
+ resp->pfdev_info.db_size,
+ resp->pfdev_info.indices_per_sb,
+ resp->pfdev_info.capabilities,
+ resc->num_rxqs,
+ resc->num_txqs,
+ resc->num_sbs,
+ resc->num_mac_filters,
+ resc->num_vlan_filters);
+ vf->state = VF_ACQUIRED;
+
+ /* Prepare Response */
+out:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
+ sizeof(struct pfvf_acquire_resp_tlv), vfpf_status);
+}
+
+static int __qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, bool val)
+{
+ struct qed_sp_vport_update_params params;
+ int rc;
+
+ if (val == p_vf->spoof_chk) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Spoofchk value[%d] is already configured\n", val);
+ return 0;
+ }
+
+ memset(&params, 0, sizeof(struct qed_sp_vport_update_params));
+ params.opaque_fid = p_vf->opaque_fid;
+ params.vport_id = p_vf->vport_id;
+ params.update_anti_spoofing_en_flg = 1;
+ params.anti_spoofing_en = val;
+
+ rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
+ if (!rc) {
+ p_vf->spoof_chk = val;
+ p_vf->req_spoofchk_val = p_vf->spoof_chk;
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Spoofchk val[%d] configured\n", val);
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Spoofchk configuration[val:%d] failed for VF[%d]\n",
+ val, p_vf->relative_vf_id);
+ }
+
+ return rc;
+}
+
+static int qed_iov_reconfigure_unicast_vlan(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf)
+{
+ struct qed_filter_ucast filter;
+ int rc = 0;
+ int i;
+
+ memset(&filter, 0, sizeof(filter));
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.opcode = QED_FILTER_ADD;
+
+ /* Reconfigure vlans */
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (!p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ filter.type = QED_FILTER_VLAN;
+ filter.vlan = p_vf->shadow_config.vlans[i].vid;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ rc = qed_sp_eth_filter_ucast(p_hwfn,
+ p_vf->opaque_fid,
+ &filter,
+ QED_SPQ_MODE_CB, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to configure VLAN [%04x] to VF [%04x]\n",
+ filter.vlan, p_vf->relative_vf_id);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int
+qed_iov_reconfigure_unicast_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u64 events)
+{
+ int rc = 0;
+
+ if ((events & (1 << VLAN_ADDR_FORCED)) &&
+ !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
+ rc = qed_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
+
+ return rc;
+}
+
+static int qed_iov_configure_vport_forced(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, u64 events)
+{
+ int rc = 0;
+ struct qed_filter_ucast filter;
+
+ if (!p_vf->vport_instance)
+ return -EINVAL;
+
+ if (events & (1 << MAC_ADDR_FORCED)) {
+ /* Since there's no way [currently] of removing the MAC,
+ * we can always assume this means we need to force it.
+ */
+ memset(&filter, 0, sizeof(filter));
+ filter.type = QED_FILTER_MAC;
+ filter.opcode = QED_FILTER_REPLACE;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ ether_addr_copy(filter.mac, p_vf->bulletin.p_virt->mac);
+
+ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, QED_SPQ_MODE_CB, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "PF failed to configure MAC for VF\n");
+ return rc;
+ }
+
+ p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
+ }
+
+ if (events & (1 << VLAN_ADDR_FORCED)) {
+ struct qed_sp_vport_update_params vport_update;
+ u8 removal;
+ int i;
+
+ memset(&filter, 0, sizeof(filter));
+ filter.type = QED_FILTER_VLAN;
+ filter.is_rx_filter = 1;
+ filter.is_tx_filter = 1;
+ filter.vport_to_add_to = p_vf->vport_id;
+ filter.vlan = p_vf->bulletin.p_virt->pvid;
+ filter.opcode = filter.vlan ? QED_FILTER_REPLACE :
+ QED_FILTER_FLUSH;
+
+ /* Send the ramrod */
+ rc = qed_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+ &filter, QED_SPQ_MODE_CB, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "PF failed to configure VLAN for VF\n");
+ return rc;
+ }
+
+ /* Update the default-vlan & silent vlan stripping */
+ memset(&vport_update, 0, sizeof(vport_update));
+ vport_update.opaque_fid = p_vf->opaque_fid;
+ vport_update.vport_id = p_vf->vport_id;
+ vport_update.update_default_vlan_enable_flg = 1;
+ vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
+ vport_update.update_default_vlan_flg = 1;
+ vport_update.default_vlan = filter.vlan;
+
+ vport_update.update_inner_vlan_removal_flg = 1;
+ removal = filter.vlan ? 1
+ : p_vf->shadow_config.inner_vlan_removal;
+ vport_update.inner_vlan_removal_flg = removal;
+ vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
+ rc = qed_sp_vport_update(p_hwfn,
+ &vport_update,
+ QED_SPQ_MODE_EBLOCK, NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "PF failed to configure VF vport for vlan\n");
+ return rc;
+ }
+
+ /* Update all the Rx queues */
+ for (i = 0; i < QED_MAX_VF_CHAINS_PER_PF; i++) {
+ u16 qid;
+
+ if (!p_vf->vf_queues[i].rxq_active)
+ continue;
+
+ qid = p_vf->vf_queues[i].fw_rx_qid;
+
+ rc = qed_sp_eth_rx_queues_update(p_hwfn, qid,
+ 1, 0, 1,
+ QED_SPQ_MODE_EBLOCK,
+ NULL);
+ if (rc) {
+ DP_NOTICE(p_hwfn,
+ "Failed to send Rx update fo queue[0x%04x]\n",
+ qid);
+ return rc;
+ }
+ }
+
+ if (filter.vlan)
+ p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
+ else
+ p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
+ }
+
+ /* If forced features are terminated, we need to configure the shadow
+ * configuration back again.
+ */
+ if (events)
+ qed_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
+
+ return rc;
+}
+
+static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_sp_vport_start_params params = { 0 };
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_vport_start_tlv *start;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct qed_vf_info *vf_info;
+ u64 *p_bitmap;
+ int sb_id;
+ int rc;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to get VF info, invalid vfid [%d]\n",
+ vf->relative_vf_id);
+ return;
+ }
+
+ vf->state = VF_ENABLED;
+ start = &mbx->req_virt->start_vport;
+
+ /* Initialize Status block in CAU */
+ for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
+ if (!start->sb_addr[sb_id]) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] did not fill the address of SB %d\n",
+ vf->relative_vf_id, sb_id);
+ break;
+ }
+
+ qed_int_cau_conf_sb(p_hwfn, p_ptt,
+ start->sb_addr[sb_id],
+ vf->igu_sbs[sb_id],
+ vf->abs_vf_id, 1);
+ }
+ qed_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
+
+ vf->mtu = start->mtu;
+ vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
+
+ /* Take into consideration configuration forced by hypervisor;
+ * If none is configured, use the supplied VF values [for old
+ * vfs that would still be fine, since they passed '0' as padding].
+ */
+ p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
+ if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
+ u8 vf_req = start->only_untagged;
+
+ vf_info->bulletin.p_virt->default_only_untagged = vf_req;
+ *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
+ }
+
+ params.tpa_mode = start->tpa_mode;
+ params.remove_inner_vlan = start->inner_vlan_removal;
+ params.tx_switching = true;
+
+ params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
+ params.drop_ttl0 = false;
+ params.concrete_fid = vf->concrete_fid;
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.max_buffers_per_cqe = start->max_buffers_per_cqe;
+ params.mtu = vf->mtu;
+
+ rc = qed_sp_eth_vport_start(p_hwfn, &params);
+ if (rc != 0) {
+ DP_ERR(p_hwfn,
+ "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
+ status = PFVF_STATUS_FAILURE;
+ } else {
+ vf->vport_instance++;
+
+ /* Force configuration if needed on the newly opened vport */
+ qed_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
+
+ __qed_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
+ }
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u8 status = PFVF_STATUS_SUCCESS;
+ int rc;
+
+ vf->vport_instance--;
+ vf->spoof_chk = false;
+
+ rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
+ if (rc != 0) {
+ DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
+ rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ /* Forget the configuration on the vport */
+ vf->configured_features = 0;
+ memset(&vf->shadow_config, 0, sizeof(vf->shadow_config));
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_start_rxq_resp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+ struct vfpf_start_rxq_tlv *req;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
+ sizeof(*p_tlv));
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if (status == PFVF_STATUS_SUCCESS) {
+ req = &mbx->req_virt->start_rxq;
+ p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
+ offsetof(struct mstorm_vf_zone,
+ non_trigger.eth_rx_queue_producers) +
+ sizeof(struct eth_rx_prod_data) * req->rx_qid;
+ }
+
+ qed_iov_send_response(p_hwfn, p_ptt, vf, sizeof(*p_tlv), status);
+}
+
+static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_queue_start_common_params params;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_NO_RESOURCE;
+ struct vfpf_start_rxq_tlv *req;
+ int rc;
+
+ memset(&params, 0, sizeof(params));
+ req = &mbx->req_virt->start_rxq;
+
+ if (!qed_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
+ !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
+ params.queue_id = vf->vf_queues[req->rx_qid].fw_rx_qid;
+ params.vf_qid = req->rx_qid;
+ params.vport_id = vf->vport_id;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ rc = qed_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
+ vf->vf_queues[req->rx_qid].fw_cid,
+ &params,
+ vf->abs_vf_id + 0x10,
+ req->bd_max_bytes,
+ req->rxq_addr,
+ req->cqe_pbl_addr, req->cqe_pbl_size);
+
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ } else {
+ status = PFVF_STATUS_SUCCESS;
+ vf->vf_queues[req->rx_qid].rxq_active = true;
+ vf->num_active_rxqs++;
+ }
+
+out:
+ qed_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status);
+}
+
+static void qed_iov_vf_mbx_start_txq_resp(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf, u8 status)
+{
+ struct qed_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+ struct pfvf_start_queue_resp_tlv *p_tlv;
+
+ mbx->offset = (u8 *)mbx->reply_virt;
+
+ p_tlv = qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
+ sizeof(*p_tlv));
+ qed_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+ sizeof(struct channel_list_end_tlv));
+
+ /* Update the TLV with the response */
+ if (status == PFVF_STATUS_SUCCESS) {
+ u16 qid = mbx->req_virt->start_txq.tx_qid;
+
+ p_tlv->offset = qed_db_addr(p_vf->vf_queues[qid].fw_cid,
+ DQ_DEMS_LEGACY);
+ }
+
+ qed_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_tlv), status);
+}
+
+static void qed_iov_vf_mbx_start_txq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_queue_start_common_params params;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_NO_RESOURCE;
+ union qed_qm_pq_params pq_params;
+ struct vfpf_start_txq_tlv *req;
+ int rc;
+
+ /* Prepare the parameters which would choose the right PQ */
+ memset(&pq_params, 0, sizeof(pq_params));
+ pq_params.eth.is_vf = 1;
+ pq_params.eth.vf_id = vf->relative_vf_id;
+
+ memset(&params, 0, sizeof(params));
+ req = &mbx->req_virt->start_txq;
+
+ if (!qed_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
+ !qed_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+ goto out;
+
+ params.queue_id = vf->vf_queues[req->tx_qid].fw_tx_qid;
+ params.vport_id = vf->vport_id;
+ params.sb = req->hw_sb;
+ params.sb_idx = req->sb_index;
+
+ rc = qed_sp_eth_txq_start_ramrod(p_hwfn,
+ vf->opaque_fid,
+ vf->vf_queues[req->tx_qid].fw_cid,
+ &params,
+ vf->abs_vf_id + 0x10,
+ req->pbl_addr,
+ req->pbl_size, &pq_params);
+
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ } else {
+ status = PFVF_STATUS_SUCCESS;
+ vf->vf_queues[req->tx_qid].txq_active = true;
+ }
+
+out:
+ qed_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
+}
+
+static int qed_iov_vf_stop_rxqs(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ u16 rxq_id, u8 num_rxqs, bool cqe_completion)
+{
+ int rc = 0;
+ int qid;
+
+ if (rxq_id + num_rxqs > ARRAY_SIZE(vf->vf_queues))
+ return -EINVAL;
+
+ for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
+ if (vf->vf_queues[qid].rxq_active) {
+ rc = qed_sp_eth_rx_queue_stop(p_hwfn,
+ vf->vf_queues[qid].
+ fw_rx_qid, false,
+ cqe_completion);
+
+ if (rc)
+ return rc;
+ }
+ vf->vf_queues[qid].rxq_active = false;
+ vf->num_active_rxqs--;
+ }
+
+ return rc;
+}
+
+static int qed_iov_vf_stop_txqs(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf, u16 txq_id, u8 num_txqs)
+{
+ int rc = 0;
+ int qid;
+
+ if (txq_id + num_txqs > ARRAY_SIZE(vf->vf_queues))
+ return -EINVAL;
+
+ for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
+ if (vf->vf_queues[qid].txq_active) {
+ rc = qed_sp_eth_tx_queue_stop(p_hwfn,
+ vf->vf_queues[qid].
+ fw_tx_qid);
+
+ if (rc)
+ return rc;
+ }
+ vf->vf_queues[qid].txq_active = false;
+ }
+ return rc;
+}
+
+static void qed_iov_vf_mbx_stop_rxqs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_stop_rxqs_tlv *req;
+ int rc;
+
+ /* We give the option of starting from qid != 0, in this case we
+ * need to make sure that qid + num_qs doesn't exceed the actual
+ * amount of queues that exist.
+ */
+ req = &mbx->req_virt->stop_rxqs;
+ rc = qed_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+ req->num_rxqs, req->cqe_completion);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
+ length, status);
+}
+
+static void qed_iov_vf_mbx_stop_txqs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct vfpf_stop_txqs_tlv *req;
+ int rc;
+
+ /* We give the option of starting from qid != 0, in this case we
+ * need to make sure that qid + num_qs doesn't exceed the actual
+ * amount of queues that exist.
+ */
+ req = &mbx->req_virt->stop_txqs;
+ rc = qed_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
+ length, status);
+}
+
+static void qed_iov_vf_mbx_update_rxqs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_update_rxq_tlv *req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ u8 complete_event_flg;
+ u8 complete_cqe_flg;
+ u16 qid;
+ int rc;
+ u8 i;
+
+ req = &mbx->req_virt->update_rxq;
+ complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
+ complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
+
+ for (i = 0; i < req->num_rxqs; i++) {
+ qid = req->rx_qid + i;
+
+ if (!vf->vf_queues[qid].rxq_active) {
+ DP_NOTICE(p_hwfn, "VF rx_qid = %d isn`t active!\n",
+ qid);
+ status = PFVF_STATUS_FAILURE;
+ break;
+ }
+
+ rc = qed_sp_eth_rx_queues_update(p_hwfn,
+ vf->vf_queues[qid].fw_rx_qid,
+ 1,
+ complete_cqe_flg,
+ complete_event_flg,
+ QED_SPQ_MODE_EBLOCK, NULL);
+
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ break;
+ }
+ }
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
+ length, status);
+}
+
+void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type)
+{
+ struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
+ int len = 0;
+
+ do {
+ if (!p_tlv->length) {
+ DP_NOTICE(p_hwfn, "Zero length TLV found\n");
+ return NULL;
+ }
+
+ if (p_tlv->type == req_type) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Extended tlv type %d, length %d found\n",
+ p_tlv->type, p_tlv->length);
+ return p_tlv;
+ }
+
+ len += p_tlv->length;
+ p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
+
+ if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
+ DP_NOTICE(p_hwfn, "TLVs has overrun the buffer size\n");
+ return NULL;
+ }
+ } while (p_tlv->type != CHANNEL_TLV_LIST_END);
+
+ return NULL;
+}
+
+static void
+qed_iov_vp_update_act_param(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+
+ p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_act_tlv)
+ return;
+
+ p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
+ p_data->vport_active_rx_flg = p_act_tlv->active_rx;
+ p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
+ p_data->vport_active_tx_flg = p_act_tlv->active_tx;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACTIVATE;
+}
+
+static void
+qed_iov_vp_update_vlan_param(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_vf_info *p_vf,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
+
+ p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_vlan_tlv)
+ return;
+
+ p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
+
+ /* Ignore the VF request if we're forcing a vlan */
+ if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
+ p_data->update_inner_vlan_removal_flg = 1;
+ p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
+ }
+
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_VLAN_STRIP;
+}
+
+static void
+qed_iov_vp_update_tx_switch(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+
+ p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ tlv);
+ if (!p_tx_switch_tlv)
+ return;
+
+ p_data->update_tx_switching_flg = 1;
+ p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_TX_SWITCH;
+}
+
+static void
+qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
+
+ p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_mcast_tlv)
+ return;
+
+ p_data->update_approx_mcast_flg = 1;
+ memcpy(p_data->bins, p_mcast_tlv->bins,
+ sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
+}
+
+static void
+qed_iov_vp_update_accept_flag(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct qed_filter_accept_flags *p_flags = &p_data->accept_flags;
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+
+ p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_accept_tlv)
+ return;
+
+ p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
+ p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
+ p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
+ p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_PARAM;
+}
+
+static void
+qed_iov_vp_update_accept_any_vlan(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+
+ p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+ tlv);
+ if (!p_accept_any_vlan)
+ return;
+
+ p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
+ p_data->update_accept_any_vlan_flg =
+ p_accept_any_vlan->update_accept_any_vlan_flg;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
+}
+
+static void
+qed_iov_vp_update_rss_param(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_rss_params *p_rss,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
+ u16 i, q_idx, max_q_idx;
+ u16 table_size;
+
+ p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+ if (!p_rss_tlv) {
+ p_data->rss_params = NULL;
+ return;
+ }
+
+ memset(p_rss, 0, sizeof(struct qed_rss_params));
+
+ p_rss->update_rss_config = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CONFIG_FLAG);
+ p_rss->update_rss_capabilities = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_CAPS_FLAG);
+ p_rss->update_rss_ind_table = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG);
+ p_rss->update_rss_key = !!(p_rss_tlv->update_rss_flags &
+ VFPF_UPDATE_RSS_KEY_FLAG);
+
+ p_rss->rss_enable = p_rss_tlv->rss_enable;
+ p_rss->rss_eng_id = vf->relative_vf_id + 1;
+ p_rss->rss_caps = p_rss_tlv->rss_caps;
+ p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
+ memcpy(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
+ sizeof(p_rss->rss_ind_table));
+ memcpy(p_rss->rss_key, p_rss_tlv->rss_key, sizeof(p_rss->rss_key));
+
+ table_size = min_t(u16, ARRAY_SIZE(p_rss->rss_ind_table),
+ (1 << p_rss_tlv->rss_table_size_log));
+
+ max_q_idx = ARRAY_SIZE(vf->vf_queues);
+
+ for (i = 0; i < table_size; i++) {
+ u16 index = vf->vf_queues[0].fw_rx_qid;
+
+ q_idx = p_rss->rss_ind_table[i];
+ if (q_idx >= max_q_idx)
+ DP_NOTICE(p_hwfn,
+ "rss_ind_table[%d] = %d, rxq is out of range\n",
+ i, q_idx);
+ else if (!vf->vf_queues[q_idx].rxq_active)
+ DP_NOTICE(p_hwfn,
+ "rss_ind_table[%d] = %d, rxq is not active\n",
+ i, q_idx);
+ else
+ index = vf->vf_queues[q_idx].fw_rx_qid;
+ p_rss->rss_ind_table[i] = index;
+ }
+
+ p_data->rss_params = p_rss;
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_RSS;
+}
+
+static void
+qed_iov_vp_update_sge_tpa_param(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *vf,
+ struct qed_sp_vport_update_params *p_data,
+ struct qed_sge_tpa_params *p_sge_tpa,
+ struct qed_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+{
+ struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
+ u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
+
+ p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+
+ if (!p_sge_tpa_tlv) {
+ p_data->sge_tpa_params = NULL;
+ return;
+ }
+
+ memset(p_sge_tpa, 0, sizeof(struct qed_sge_tpa_params));
+
+ p_sge_tpa->update_tpa_en_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
+ p_sge_tpa->update_tpa_param_flg =
+ !!(p_sge_tpa_tlv->update_sge_tpa_flags &
+ VFPF_UPDATE_TPA_PARAM_FLAG);
+
+ p_sge_tpa->tpa_ipv4_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
+ p_sge_tpa->tpa_ipv6_en_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
+ p_sge_tpa->tpa_pkt_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
+ p_sge_tpa->tpa_hdr_data_split_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
+ p_sge_tpa->tpa_gro_consistent_flg =
+ !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
+
+ p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
+ p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
+ p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
+ p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
+ p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
+
+ p_data->sge_tpa_params = p_sge_tpa;
+
+ *tlvs_mask |= 1 << QED_IOV_VP_UPDATE_SGE_TPA;
+}
+
+static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_sp_vport_update_params params;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct qed_sge_tpa_params sge_tpa_params;
+ struct qed_rss_params rss_params;
+ u8 status = PFVF_STATUS_SUCCESS;
+ u16 tlvs_mask = 0;
+ u16 length;
+ int rc;
+
+ /* Valiate PF can send such a request */
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing vport update\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ memset(&params, 0, sizeof(params));
+ params.opaque_fid = vf->opaque_fid;
+ params.vport_id = vf->vport_id;
+ params.rss_params = NULL;
+
+ /* Search for extended tlvs list and update values
+ * from VF in struct qed_sp_vport_update_params.
+ */
+ qed_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
+ qed_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
+ mbx, &tlvs_mask);
+ qed_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
+ qed_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
+ &sge_tpa_params, mbx, &tlvs_mask);
+
+ /* Just log a message if there is no single extended tlv in buffer.
+ * When all features of vport update ramrod would be requested by VF
+ * as extended TLVs in buffer then an error can be returned in response
+ * if there is no extended TLV present in buffer.
+ */
+ if (!tlvs_mask) {
+ DP_NOTICE(p_hwfn,
+ "No feature tlvs found for vport update\n");
+ status = PFVF_STATUS_NOT_SUPPORTED;
+ goto out;
+ }
+
+ rc = qed_sp_vport_update(p_hwfn, &params, QED_SPQ_MODE_EBLOCK, NULL);
+
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ length = qed_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
+ tlvs_mask, tlvs_mask);
+ qed_iov_send_response(p_hwfn, p_ptt, vf, length, status);
+}
+
+static int qed_iov_vf_update_vlan_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
+{
+ int i;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == QED_FILTER_REMOVE) {
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ if (p_vf->shadow_config.vlans[i].used &&
+ p_vf->shadow_config.vlans[i].vid ==
+ p_params->vlan) {
+ p_vf->shadow_config.vlans[i].used = false;
+ break;
+ }
+ if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF [%d] - Tries to remove a non-existing vlan\n",
+ p_vf->relative_vf_id);
+ return -EINVAL;
+ }
+ } else if (p_params->opcode == QED_FILTER_REPLACE ||
+ p_params->opcode == QED_FILTER_FLUSH) {
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
+ p_vf->shadow_config.vlans[i].used = false;
+ }
+
+ /* In forced mode, we're willing to remove entries - but we don't add
+ * new ones.
+ */
+ if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
+ return 0;
+
+ if (p_params->opcode == QED_FILTER_ADD ||
+ p_params->opcode == QED_FILTER_REPLACE) {
+ for (i = 0; i < QED_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+ if (p_vf->shadow_config.vlans[i].used)
+ continue;
+
+ p_vf->shadow_config.vlans[i].used = true;
+ p_vf->shadow_config.vlans[i].vid = p_params->vlan;
+ break;
+ }
+
+ if (i == QED_ETH_VF_NUM_VLAN_FILTERS + 1) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF [%d] - Tries to configure more than %d vlan filters\n",
+ p_vf->relative_vf_id,
+ QED_ETH_VF_NUM_VLAN_FILTERS + 1);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
+{
+ int i;
+
+ /* If we're in forced-mode, we don't allow any change */
+ if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
+ return 0;
+
+ /* First remove entries and then add new ones */
+ if (p_params->opcode == QED_FILTER_REMOVE) {
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (ether_addr_equal(p_vf->shadow_config.macs[i],
+ p_params->mac)) {
+ memset(p_vf->shadow_config.macs[i], 0,
+ ETH_ALEN);
+ break;
+ }
+ }
+
+ if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "MAC isn't configured\n");
+ return -EINVAL;
+ }
+ } else if (p_params->opcode == QED_FILTER_REPLACE ||
+ p_params->opcode == QED_FILTER_FLUSH) {
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
+ memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN);
+ }
+
+ /* List the new MAC address */
+ if (p_params->opcode != QED_FILTER_ADD &&
+ p_params->opcode != QED_FILTER_REPLACE)
+ return 0;
+
+ for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
+ if (is_zero_ether_addr(p_vf->shadow_config.macs[i])) {
+ ether_addr_copy(p_vf->shadow_config.macs[i],
+ p_params->mac);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Added MAC at %d entry in shadow\n", i);
+ break;
+ }
+ }
+
+ if (i == QED_ETH_VF_NUM_MAC_FILTERS) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "No available place for MAC\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+qed_iov_vf_update_unicast_shadow(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf,
+ struct qed_filter_ucast *p_params)
+{
+ int rc = 0;
+
+ if (p_params->type == QED_FILTER_MAC) {
+ rc = qed_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
+ if (rc)
+ return rc;
+ }
+
+ if (p_params->type == QED_FILTER_VLAN)
+ rc = qed_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
+
+ return rc;
+}
+
+int qed_iov_chk_ucast(struct qed_hwfn *hwfn,
+ int vfid, struct qed_filter_ucast *params)
+{
+ struct qed_public_vf_info *vf;
+
+ vf = qed_iov_get_public_vf_info(hwfn, vfid, true);
+ if (!vf)
+ return -EINVAL;
+
+ /* No real decision to make; Store the configured MAC */
+ if (params->type == QED_FILTER_MAC ||
+ params->type == QED_FILTER_MAC_VLAN)
+ ether_addr_copy(vf->mac, params->mac);
+
+ return 0;
+}
+
+static void qed_iov_vf_mbx_ucast_filter(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ struct qed_bulletin_content *p_bulletin = vf->bulletin.p_virt;
+ struct qed_iov_vf_mbx *mbx = &vf->vf_mbx;
+ struct vfpf_ucast_filter_tlv *req;
+ u8 status = PFVF_STATUS_SUCCESS;
+ struct qed_filter_ucast params;
+ int rc;
+
+ /* Prepare the unicast filter params */
+ memset(&params, 0, sizeof(struct qed_filter_ucast));
+ req = &mbx->req_virt->ucast_filter;
+ params.opcode = (enum qed_filter_opcode)req->opcode;
+ params.type = (enum qed_filter_ucast_type)req->type;
+
+ params.is_rx_filter = 1;
+ params.is_tx_filter = 1;
+ params.vport_to_remove_from = vf->vport_id;
+ params.vport_to_add_to = vf->vport_id;
+ memcpy(params.mac, req->mac, ETH_ALEN);
+ params.vlan = req->vlan;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
+ vf->abs_vf_id, params.opcode, params.type,
+ params.is_rx_filter ? "RX" : "",
+ params.is_tx_filter ? "TX" : "",
+ params.vport_to_add_to,
+ params.mac[0], params.mac[1],
+ params.mac[2], params.mac[3],
+ params.mac[4], params.mac[5], params.vlan);
+
+ if (!vf->vport_instance) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
+ vf->abs_vf_id);
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Update shadow copy of the VF configuration */
+ if (qed_iov_vf_update_unicast_shadow(p_hwfn, vf, &params)) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ /* Determine if the unicast filtering is acceptible by PF */
+ if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
+ (params.type == QED_FILTER_VLAN ||
+ params.type == QED_FILTER_MAC_VLAN)) {
+ /* Once VLAN is forced or PVID is set, do not allow
+ * to add/replace any further VLANs.
+ */
+ if (params.opcode == QED_FILTER_ADD ||
+ params.opcode == QED_FILTER_REPLACE)
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
+ (params.type == QED_FILTER_MAC ||
+ params.type == QED_FILTER_MAC_VLAN)) {
+ if (!ether_addr_equal(p_bulletin->mac, params.mac) ||
+ (params.opcode != QED_FILTER_ADD &&
+ params.opcode != QED_FILTER_REPLACE))
+ status = PFVF_STATUS_FORCED;
+ goto out;
+ }
+
+ rc = qed_iov_chk_ucast(p_hwfn, vf->relative_vf_id, &params);
+ if (rc) {
+ status = PFVF_STATUS_FAILURE;
+ goto out;
+ }
+
+ rc = qed_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
+ QED_SPQ_MODE_CB, NULL);
+ if (rc)
+ status = PFVF_STATUS_FAILURE;
+
+out:
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
+ sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+static void qed_iov_vf_mbx_int_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *vf)
+{
+ int i;
+
+ /* Reset the SBs */
+ for (i = 0; i < vf->num_sbs; i++)
+ qed_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+ vf->igu_sbs[i],
+ vf->opaque_fid, false);
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_SUCCESS);
+}
+
+static void qed_iov_vf_mbx_close(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct qed_vf_info *vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+
+ /* Disable Interrupts for VF */
+ qed_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
+
+ /* Reset Permission table */
+ qed_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
+ length, status);
+}
+
+static void qed_iov_vf_mbx_release(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_vf_info *p_vf)
+{
+ u16 length = sizeof(struct pfvf_def_resp_tlv);
+ u8 status = PFVF_STATUS_SUCCESS;
+ int rc = 0;
+
+ qed_iov_vf_cleanup(p_hwfn, p_vf);
+
+ if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
+ /* Stopping the VF */
+ rc = qed_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
+ p_vf->opaque_fid);
+
+ if (rc) {
+ DP_ERR(p_hwfn, "qed_sp_vf_stop returned error %d\n",
+ rc);
+ status = PFVF_STATUS_FAILURE;
+ }
+
+ p_vf->state = VF_STOPPED;
+ }
+
+ qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
+ length, status);
+}
+
+static int
+qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+ int cnt;
+ u32 val;
+
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
+
+ for (cnt = 0; cnt < 50; cnt++) {
+ val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
+ if (!val)
+ break;
+ msleep(20);
+ }
+ qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn,
+ "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
+ p_vf->abs_vf_id, val);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+ u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+ int i, cnt;
+
+ /* Read initial consumers & producers */
+ for (i = 0; i < MAX_NUM_VOQS; i++) {
+ u32 prod;
+
+ cons[i] = qed_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+ i * 0x40);
+ prod = qed_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
+ i * 0x40);
+ distance[i] = prod - cons[i];
+ }
+
+ /* Wait for consumers to pass the producers */
+ i = 0;
+ for (cnt = 0; cnt < 50; cnt++) {
+ for (; i < MAX_NUM_VOQS; i++) {
+ u32 tmp;
+
+ tmp = qed_rd(p_hwfn, p_ptt,
+ PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
+ i * 0x40);
+ if (distance[i] > tmp - cons[i])
+ break;
+ }
+
+ if (i == MAX_NUM_VOQS)
+ break;
+
+ msleep(20);
+ }
+
+ if (cnt == 50) {
+ DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
+ p_vf->abs_vf_id, i);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int qed_iov_vf_flr_poll(struct qed_hwfn *p_hwfn,
+ struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
+{
+ int rc;
+
+ rc = qed_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ rc = qed_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int
+qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 rel_vf_id, u32 *ack_vfs)
+{
+ struct qed_vf_info *p_vf;
+ int rc = 0;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+ if (!p_vf)
+ return 0;
+
+ if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
+ (1ULL << (rel_vf_id % 64))) {
+ u16 vfid = p_vf->abs_vf_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] - Handling FLR\n", vfid);
+
+ qed_iov_vf_cleanup(p_hwfn, p_vf);
+
+ /* If VF isn't active, no need for anything but SW */
+ if (!p_vf->b_init)
+ goto cleanup;
+
+ rc = qed_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
+ if (rc)
+ goto cleanup;
+
+ rc = qed_final_cleanup(p_hwfn, p_ptt, vfid, true);
+ if (rc) {
+ DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
+ return rc;
+ }
+
+ /* VF_STOPPED has to be set only after final cleanup
+ * but prior to re-enabling the VF.
+ */
+ p_vf->state = VF_STOPPED;
+
+ rc = qed_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
+ if (rc) {
+ DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
+ vfid);
+ return rc;
+ }
+cleanup:
+ /* Mark VF for ack and clean pending state */
+ if (p_vf->state == VF_RESET)
+ p_vf->state = VF_STOPPED;
+ ack_vfs[vfid / 32] |= (1 << (vfid % 32));
+ p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
+ ~(1ULL << (rel_vf_id % 64));
+ p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
+ ~(1ULL << (rel_vf_id % 64));
+ }
+
+ return rc;
+}
+
+int qed_iov_vf_flr_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+ u32 ack_vfs[VF_MAX_STATIC / 32];
+ int rc = 0;
+ u16 i;
+
+ memset(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
+
+ /* Since BRB <-> PRS interface can't be tested as part of the flr
+ * polling due to HW limitations, simply sleep a bit. And since
+ * there's no need to wait per-vf, do it before looping.
+ */
+ msleep(100);
+
+ for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++)
+ qed_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
+
+ rc = qed_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
+ return rc;
+}
+
+int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *p_disabled_vfs)
+{
+ u16 i, found = 0;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "Marking FLR-ed VFs\n");
+ for (i = 0; i < (VF_MAX_STATIC / 32); i++)
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "[%08x,...,%08x]: %08x\n",
+ i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
+
+ if (!p_hwfn->cdev->p_iov_info) {
+ DP_NOTICE(p_hwfn, "VF flr but no IOV\n");
+ return 0;
+ }
+
+ /* Mark VFs */
+ for (i = 0; i < p_hwfn->cdev->p_iov_info->total_vfs; i++) {
+ struct qed_vf_info *p_vf;
+ u8 vfid;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, i, false);
+ if (!p_vf)
+ continue;
+
+ vfid = p_vf->abs_vf_id;
+ if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
+ u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
+ u16 rel_vf_id = p_vf->relative_vf_id;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%d] [rel %d] got FLR-ed\n",
+ vfid, rel_vf_id);
+
+ p_vf->state = VF_RESET;
+
+ /* No need to lock here, since pending_flr should
+ * only change here and before ACKing MFw. Since
+ * MFW will not trigger an additional attention for
+ * VF flr until ACKs, we're safe.
+ */
+ p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
+ found = 1;
+ }
+ }
+
+ return found;
+}
+
+static void qed_iov_get_link(struct qed_hwfn *p_hwfn,
+ u16 vfid,
+ struct qed_mcp_link_params *p_params,
+ struct qed_mcp_link_state *p_link,
+ struct qed_mcp_link_capabilities *p_caps)
+{
+ struct qed_vf_info *p_vf = qed_iov_get_vf_info(p_hwfn,
+ vfid,
+ false);
+ struct qed_bulletin_content *p_bulletin;
+
+ if (!p_vf)
+ return;
+
+ p_bulletin = p_vf->bulletin.p_virt;
+
+ if (p_params)
+ __qed_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+ if (p_link)
+ __qed_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+ if (p_caps)
+ __qed_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+}
+
+static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int vfid)
+{
+ struct qed_iov_vf_mbx *mbx;
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf)
+ return;
+
+ mbx = &p_vf->vf_mbx;
+
+ /* qed_iov_process_mbx_request */
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
+
+ mbx->first_tlv = mbx->req_virt->first_tlv;
+
+ /* check if tlv type is known */
+ if (qed_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+ switch (mbx->first_tlv.tl.type) {
+ case CHANNEL_TLV_ACQUIRE:
+ qed_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_START:
+ qed_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_TEARDOWN:
+ qed_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_RXQ:
+ qed_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_START_TXQ:
+ qed_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_RXQS:
+ qed_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_STOP_TXQS:
+ qed_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UPDATE_RXQ:
+ qed_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_VPORT_UPDATE:
+ qed_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_UCAST_FILTER:
+ qed_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_CLOSE:
+ qed_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_INT_CLEANUP:
+ qed_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
+ break;
+ case CHANNEL_TLV_RELEASE:
+ qed_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
+ break;
+ }
+ } else {
+ /* unknown TLV - this may belong to a VF driver from the future
+ * - a version written after this PF driver was written, which
+ * supports features unknown as of yet. Too bad since we don't
+ * support them. Or this may be because someone wrote a crappy
+ * VF driver and is sending garbage over the channel.
+ */
+ DP_NOTICE(p_hwfn,
+ "VF[%02x]: unknown TLV. type %04x length %04x padding %08x reply address %llu\n",
+ p_vf->abs_vf_id,
+ mbx->first_tlv.tl.type,
+ mbx->first_tlv.tl.length,
+ mbx->first_tlv.padding, mbx->first_tlv.reply_address);
+
+ /* Try replying in case reply address matches the acquisition's
+ * posted address.
+ */
+ if (p_vf->acquire.first_tlv.reply_address &&
+ (mbx->first_tlv.reply_address ==
+ p_vf->acquire.first_tlv.reply_address)) {
+ qed_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+ mbx->first_tlv.tl.type,
+ sizeof(struct pfvf_def_resp_tlv),
+ PFVF_STATUS_NOT_SUPPORTED);
+ } else {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF[%02x]: Can't respond to TLV - no valid reply address\n",
+ p_vf->abs_vf_id);
+ }
+ }
+}
+
+void qed_iov_pf_add_pending_events(struct qed_hwfn *p_hwfn, u8 vfid)
+{
+ u64 add_bit = 1ULL << (vfid % 64);
+
+ p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
+}
+
+static void qed_iov_pf_get_and_clear_pending_events(struct qed_hwfn *p_hwfn,
+ u64 *events)
+{
+ u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+
+ memcpy(events, p_pending_events, sizeof(u64) * QED_VF_ARRAY_LENGTH);
+ memset(p_pending_events, 0, sizeof(u64) * QED_VF_ARRAY_LENGTH);
+}
+
+static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
+ u16 abs_vfid, struct regpair *vf_msg)
+{
+ u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+ struct qed_vf_info *p_vf;
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "Got a message from VF [abs 0x%08x] that cannot be handled by PF\n",
+ abs_vfid);
+ return 0;
+ }
+ p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+
+ /* List the physical address of the request so that handler
+ * could later on copy the message from it.
+ */
+ p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
+
+ /* Mark the event and schedule the workqueue */
+ qed_iov_pf_add_pending_events(p_hwfn, p_vf->relative_vf_id);
+ qed_schedule_iov(p_hwfn, QED_IOV_WQ_MSG_FLAG);
+
+ return 0;
+}
+
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode, __le16 echo, union event_ring_data *data)
+{
+ switch (opcode) {
+ case COMMON_EVENT_VF_PF_CHANNEL:
+ return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
+ &data->vf_pf_channel.msg_addr);
+ default:
+ DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
+ opcode);
+ return -EINVAL;
+ }
+}
+
+u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
+ u16 i;
+
+ if (!p_iov)
+ goto out;
+
+ for (i = rel_vf_id; i < p_iov->total_vfs; i++)
+ if (qed_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
+ return i;
+
+out:
+ return MAX_NUM_VFS;
+}
+
+static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
+ int vfid)
+{
+ struct qed_dmae_params params;
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info)
+ return -EINVAL;
+
+ memset(&params, 0, sizeof(struct qed_dmae_params));
+ params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
+ params.src_vfid = vf_info->abs_vf_id;
+
+ if (qed_dmae_host2host(p_hwfn, ptt,
+ vf_info->vf_mbx.pending_req,
+ vf_info->vf_mbx.req_phys,
+ sizeof(union vfpf_tlvs) / 4, &params)) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Failed to copy message from VF 0x%02x\n", vfid);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void qed_iov_bulletin_set_forced_mac(struct qed_hwfn *p_hwfn,
+ u8 *mac, int vfid)
+{
+ struct qed_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ feature = 1 << MAC_ADDR_FORCED;
+ memcpy(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
+
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ /* Forced MAC will disable MAC_ADDR */
+ vf_info->bulletin.p_virt->valid_bitmap &=
+ ~(1 << VFPF_BULLETIN_MAC_ADDR);
+
+ qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
+ u16 pvid, int vfid)
+{
+ struct qed_vf_info *vf_info;
+ u64 feature;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+ return;
+ }
+
+ feature = 1 << VLAN_ADDR_FORCED;
+ vf_info->bulletin.p_virt->pvid = pvid;
+ if (pvid)
+ vf_info->bulletin.p_virt->valid_bitmap |= feature;
+ else
+ vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
+
+ qed_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_vf_info *p_vf_info;
+
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf_info)
+ return false;
+
+ return !!p_vf_info->vport_instance;
+}
+
+bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_vf_info *p_vf_info;
+
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!p_vf_info)
+ return true;
+
+ return p_vf_info->state == VF_STOPPED;
+}
+
+static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info)
+ return false;
+
+ return vf_info->spoof_chk;
+}
+
+int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
+{
+ struct qed_vf_info *vf;
+ int rc = -EINVAL;
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn,
+ "SR-IOV sanity check failed, can't set spoofchk\n");
+ goto out;
+ }
+
+ vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf)
+ goto out;
+
+ if (!qed_iov_vf_has_vport_instance(p_hwfn, vfid)) {
+ /* After VF VPORT start PF will configure spoof check */
+ vf->req_spoofchk_val = val;
+ rc = 0;
+ goto out;
+ }
+
+ rc = __qed_iov_spoofchk_set(p_hwfn, vf, val);
+
+out:
+ return rc;
+}
+
+static u8 *qed_iov_bulletin_get_forced_mac(struct qed_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return NULL;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ return NULL;
+
+ return p_vf->bulletin.p_virt->mac;
+}
+
+u16 qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
+{
+ struct qed_vf_info *p_vf;
+
+ p_vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+ if (!p_vf || !p_vf->bulletin.p_virt)
+ return 0;
+
+ if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
+ return 0;
+
+ return p_vf->bulletin.p_virt->pvid;
+}
+
+static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, int vfid, int val)
+{
+ struct qed_vf_info *vf;
+ u8 abs_vp_id = 0;
+ int rc;
+
+ vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+ if (!vf)
+ return -EINVAL;
+
+ rc = qed_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
+ if (rc)
+ return rc;
+
+ return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
+}
+
+int qed_iov_configure_min_tx_rate(struct qed_dev *cdev, int vfid, u32 rate)
+{
+ struct qed_vf_info *vf;
+ u8 vport_id;
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn,
+ "SR-IOV sanity check failed, can't set min rate\n");
+ return -EINVAL;
+ }
+ }
+
+ vf = qed_iov_get_vf_info(QED_LEADING_HWFN(cdev), (u16)vfid, true);
+ vport_id = vf->vport_id;
+
+ return qed_configure_vport_wfq(cdev, vport_id, rate);
+}
+
+static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
+{
+ struct qed_wfq_data *vf_vp_wfq;
+ struct qed_vf_info *vf_info;
+
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ if (!vf_info)
+ return 0;
+
+ vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
+
+ if (vf_vp_wfq->configured)
+ return vf_vp_wfq->min_speed;
+ else
+ return 0;
+}
+
+/**
+ * qed_schedule_iov - schedules IOV task for VF and PF
+ * @hwfn: hardware function pointer
+ * @flag: IOV flag for VF/PF
+ */
+void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
+{
+ smp_mb__before_atomic();
+ set_bit(flag, &hwfn->iov_task_flags);
+ smp_mb__after_atomic();
+ DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
+ queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
+}
+
+void qed_vf_start_iov_wq(struct qed_dev *cdev)
+{
+ int i;
+
+ for_each_hwfn(cdev, i)
+ queue_delayed_work(cdev->hwfns[i].iov_wq,
+ &cdev->hwfns[i].iov_task, 0);
+}
+
+int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
+{
+ int i, j;
+
+ for_each_hwfn(cdev, i)
+ if (cdev->hwfns[i].iov_wq)
+ flush_workqueue(cdev->hwfns[i].iov_wq);
+
+ /* Mark VFs for disablement */
+ qed_iov_set_vfs_to_disable(cdev, true);
+
+ if (cdev->p_iov_info && cdev->p_iov_info->num_vfs && pci_enabled)
+ pci_disable_sriov(cdev->pdev);
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+ /* Failure to acquire the ptt in 100g creates an odd error
+ * where the first engine has already relased IOV.
+ */
+ if (!ptt) {
+ DP_ERR(hwfn, "Failed to acquire ptt\n");
+ return -EBUSY;
+ }
+
+ /* Clean WFQ db and configure equal weight for all vports */
+ qed_clean_wfq_db(hwfn, ptt);
+
+ qed_for_each_vf(hwfn, j) {
+ int k;
+
+ if (!qed_iov_is_valid_vfid(hwfn, j, true))
+ continue;
+
+ /* Wait until VF is disabled before releasing */
+ for (k = 0; k < 100; k++) {
+ if (!qed_iov_is_vf_stopped(hwfn, j))
+ msleep(20);
+ else
+ break;
+ }
+
+ if (k < 100)
+ qed_iov_release_hw_for_vf(&cdev->hwfns[i],
+ ptt, j);
+ else
+ DP_ERR(hwfn,
+ "Timeout waiting for VF's FLR to end\n");
+ }
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ qed_iov_set_vfs_to_disable(cdev, false);
+
+ return 0;
+}
+
+static int qed_sriov_enable(struct qed_dev *cdev, int num)
+{
+ struct qed_sb_cnt_info sb_cnt_info;
+ int i, j, rc;
+
+ if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
+ DP_NOTICE(cdev, "Can start at most %d VFs\n",
+ RESC_NUM(&cdev->hwfns[0], QED_VPORT) - 1);
+ return -EINVAL;
+ }
+
+ /* Initialize HW for VF access */
+ for_each_hwfn(cdev, j) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[j];
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+ int num_sbs = 0, limit = 16;
+
+ if (!ptt) {
+ DP_ERR(hwfn, "Failed to acquire ptt\n");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ if (IS_MF_DEFAULT(hwfn))
+ limit = MAX_NUM_VFS_BB / hwfn->num_funcs_on_engine;
+
+ memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
+ qed_int_get_num_sbs(hwfn, &sb_cnt_info);
+ num_sbs = min_t(int, sb_cnt_info.sb_free_blk, limit);
+
+ for (i = 0; i < num; i++) {
+ if (!qed_iov_is_valid_vfid(hwfn, i, false))
+ continue;
+
+ rc = qed_iov_init_hw_for_vf(hwfn,
+ ptt, i, num_sbs / num);
+ if (rc) {
+ DP_ERR(cdev, "Failed to enable VF[%d]\n", i);
+ qed_ptt_release(hwfn, ptt);
+ goto err;
+ }
+ }
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ /* Enable SRIOV PCIe functions */
+ rc = pci_enable_sriov(cdev->pdev, num);
+ if (rc) {
+ DP_ERR(cdev, "Failed to enable sriov [%d]\n", rc);
+ goto err;
+ }
+
+ return num;
+
+err:
+ qed_sriov_disable(cdev, false);
+ return rc;
+}
+
+static int qed_sriov_configure(struct qed_dev *cdev, int num_vfs_param)
+{
+ if (!IS_QED_SRIOV(cdev)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV, "SR-IOV is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (num_vfs_param)
+ return qed_sriov_enable(cdev, num_vfs_param);
+ else
+ return qed_sriov_disable(cdev, true);
+}
+
+static int qed_sriov_pf_set_mac(struct qed_dev *cdev, u8 *mac, int vfid)
+{
+ int i;
+
+ if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set a VF MAC; Sriov is not enabled\n");
+ return -EINVAL;
+ }
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
+ return -EINVAL;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+ if (!vf_info)
+ continue;
+
+ /* Set the forced MAC, and schedule the IOV task */
+ ether_addr_copy(vf_info->forced_mac, mac);
+ qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
+ }
+
+ return 0;
+}
+
+static int qed_sriov_pf_set_vlan(struct qed_dev *cdev, u16 vid, int vfid)
+{
+ int i;
+
+ if (!IS_QED_SRIOV(cdev) || !IS_PF_SRIOV_ALLOC(&cdev->hwfns[0])) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set a VF MAC; Sriov is not enabled\n");
+ return -EINVAL;
+ }
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vfid, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "Cannot set VF[%d] MAC (VF is not active)\n", vfid);
+ return -EINVAL;
+ }
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vfid, true);
+ if (!vf_info)
+ continue;
+
+ /* Set the forced vlan, and schedule the IOV task */
+ vf_info->forced_vlan = vid;
+ qed_schedule_iov(hwfn, QED_IOV_WQ_SET_UNICAST_FILTER_FLAG);
+ }
+
+ return 0;
+}
+
+static int qed_get_vf_config(struct qed_dev *cdev,
+ int vf_id, struct ifla_vf_info *ivi)
+{
+ struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
+ struct qed_public_vf_info *vf_info;
+ struct qed_mcp_link_state link;
+ u32 tx_rate;
+
+ /* Sanitize request */
+ if (IS_VF(cdev))
+ return -EINVAL;
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "VF index [%d] isn't active\n", vf_id);
+ return -EINVAL;
+ }
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+
+ qed_iov_get_link(hwfn, vf_id, NULL, &link, NULL);
+
+ /* Fill information about VF */
+ ivi->vf = vf_id;
+
+ if (is_valid_ether_addr(vf_info->forced_mac))
+ ether_addr_copy(ivi->mac, vf_info->forced_mac);
+ else
+ ether_addr_copy(ivi->mac, vf_info->mac);
+
+ ivi->vlan = vf_info->forced_vlan;
+ ivi->spoofchk = qed_iov_spoofchk_get(hwfn, vf_id);
+ ivi->linkstate = vf_info->link_state;
+ tx_rate = vf_info->tx_rate;
+ ivi->max_tx_rate = tx_rate ? tx_rate : link.speed;
+ ivi->min_tx_rate = qed_iov_get_vf_min_rate(hwfn, vf_id);
+
+ return 0;
+}
+
+void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
+{
+ struct qed_mcp_link_capabilities caps;
+ struct qed_mcp_link_params params;
+ struct qed_mcp_link_state link;
+ int i;
+
+ if (!hwfn->pf_iov_info)
+ return;
+
+ /* Update bulletin of all future possible VFs with link configuration */
+ for (i = 0; i < hwfn->cdev->p_iov_info->total_vfs; i++) {
+ struct qed_public_vf_info *vf_info;
+
+ vf_info = qed_iov_get_public_vf_info(hwfn, i, false);
+ if (!vf_info)
+ continue;
+
+ memcpy(&params, qed_mcp_get_link_params(hwfn), sizeof(params));
+ memcpy(&link, qed_mcp_get_link_state(hwfn), sizeof(link));
+ memcpy(&caps, qed_mcp_get_link_capabilities(hwfn),
+ sizeof(caps));
+
+ /* Modify link according to the VF's configured link state */
+ switch (vf_info->link_state) {
+ case IFLA_VF_LINK_STATE_DISABLE:
+ link.link_up = false;
+ break;
+ case IFLA_VF_LINK_STATE_ENABLE:
+ link.link_up = true;
+ /* Set speed according to maximum supported by HW.
+ * that is 40G for regular devices and 100G for CMT
+ * mode devices.
+ */
+ link.speed = (hwfn->cdev->num_hwfns > 1) ?
+ 100000 : 40000;
+ default:
+ /* In auto mode pass PF link image to VF */
+ break;
+ }
+
+ if (link.link_up && vf_info->tx_rate) {
+ struct qed_ptt *ptt;
+ int rate;
+
+ rate = min_t(int, vf_info->tx_rate, link.speed);
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Failed to acquire PTT\n");
+ return;
+ }
+
+ if (!qed_iov_configure_tx_rate(hwfn, ptt, i, rate)) {
+ vf_info->tx_rate = rate;
+ link.speed = rate;
+ }
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ qed_iov_set_link(hwfn, i, &params, &link, &caps);
+ }
+
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+}
+
+static int qed_set_vf_link_state(struct qed_dev *cdev,
+ int vf_id, int link_state)
+{
+ int i;
+
+ /* Sanitize request */
+ if (IS_VF(cdev))
+ return -EINVAL;
+
+ if (!qed_iov_is_valid_vfid(&cdev->hwfns[0], vf_id, true)) {
+ DP_VERBOSE(cdev, QED_MSG_IOV,
+ "VF index [%d] isn't active\n", vf_id);
+ return -EINVAL;
+ }
+
+ /* Handle configuration of link state */
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf;
+
+ vf = qed_iov_get_public_vf_info(hwfn, vf_id, true);
+ if (!vf)
+ continue;
+
+ if (vf->link_state == link_state)
+ continue;
+
+ vf->link_state = link_state;
+ qed_inform_vf_link_state(&cdev->hwfns[i]);
+ }
+
+ return 0;
+}
+
+static int qed_spoof_configure(struct qed_dev *cdev, int vfid, bool val)
+{
+ int i, rc = -EINVAL;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ rc = qed_iov_spoofchk_set(p_hwfn, vfid, val);
+ if (rc)
+ break;
+ }
+
+ return rc;
+}
+
+static int qed_configure_max_vf_rate(struct qed_dev *cdev, int vfid, int rate)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+ struct qed_public_vf_info *vf;
+
+ if (!qed_iov_pf_sanity_check(p_hwfn, vfid)) {
+ DP_NOTICE(p_hwfn,
+ "SR-IOV sanity check failed, can't set tx rate\n");
+ return -EINVAL;
+ }
+
+ vf = qed_iov_get_public_vf_info(p_hwfn, vfid, true);
+
+ vf->tx_rate = rate;
+
+ qed_inform_vf_link_state(p_hwfn);
+ }
+
+ return 0;
+}
+
+static int qed_set_vf_rate(struct qed_dev *cdev,
+ int vfid, u32 min_rate, u32 max_rate)
+{
+ int rc_min = 0, rc_max = 0;
+
+ if (max_rate)
+ rc_max = qed_configure_max_vf_rate(cdev, vfid, max_rate);
+
+ if (min_rate)
+ rc_min = qed_iov_configure_min_tx_rate(cdev, vfid, min_rate);
+
+ if (rc_max | rc_min)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void qed_handle_vf_msg(struct qed_hwfn *hwfn)
+{
+ u64 events[QED_VF_ARRAY_LENGTH];
+ struct qed_ptt *ptt;
+ int i;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Can't acquire PTT; re-scheduling\n");
+ qed_schedule_iov(hwfn, QED_IOV_WQ_MSG_FLAG);
+ return;
+ }
+
+ qed_iov_pf_get_and_clear_pending_events(hwfn, events);
+
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Event mask of VF events: 0x%llx 0x%llx 0x%llx\n",
+ events[0], events[1], events[2]);
+
+ qed_for_each_vf(hwfn, i) {
+ /* Skip VFs with no pending messages */
+ if (!(events[i / 64] & (1ULL << (i % 64))))
+ continue;
+
+ DP_VERBOSE(hwfn, QED_MSG_IOV,
+ "Handling VF message from VF 0x%02x [Abs 0x%02x]\n",
+ i, hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+ /* Copy VF's message to PF's request buffer for that VF */
+ if (qed_iov_copy_vf_msg(hwfn, ptt, i))
+ continue;
+
+ qed_iov_process_mbx_req(hwfn, ptt, i);
+ }
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+static void qed_handle_pf_set_vf_unicast(struct qed_hwfn *hwfn)
+{
+ int i;
+
+ qed_for_each_vf(hwfn, i) {
+ struct qed_public_vf_info *info;
+ bool update = false;
+ u8 *mac;
+
+ info = qed_iov_get_public_vf_info(hwfn, i, true);
+ if (!info)
+ continue;
+
+ /* Update data on bulletin board */
+ mac = qed_iov_bulletin_get_forced_mac(hwfn, i);
+ if (is_valid_ether_addr(info->forced_mac) &&
+ (!mac || !ether_addr_equal(mac, info->forced_mac))) {
+ DP_VERBOSE(hwfn,
+ QED_MSG_IOV,
+ "Handling PF setting of VF MAC to VF 0x%02x [Abs 0x%02x]\n",
+ i,
+ hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+
+ /* Update bulletin board with forced MAC */
+ qed_iov_bulletin_set_forced_mac(hwfn,
+ info->forced_mac, i);
+ update = true;
+ }
+
+ if (qed_iov_bulletin_get_forced_vlan(hwfn, i) ^
+ info->forced_vlan) {
+ DP_VERBOSE(hwfn,
+ QED_MSG_IOV,
+ "Handling PF setting of pvid [0x%04x] to VF 0x%02x [Abs 0x%02x]\n",
+ info->forced_vlan,
+ i,
+ hwfn->cdev->p_iov_info->first_vf_in_pf + i);
+ qed_iov_bulletin_set_forced_vlan(hwfn,
+ info->forced_vlan, i);
+ update = true;
+ }
+
+ if (update)
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ }
+}
+
+static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
+{
+ struct qed_ptt *ptt;
+ int i;
+
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_NOTICE(hwfn, "Failed allocating a ptt entry\n");
+ qed_schedule_iov(hwfn, QED_IOV_WQ_BULLETIN_UPDATE_FLAG);
+ return;
+ }
+
+ qed_for_each_vf(hwfn, i)
+ qed_iov_post_vf_bulletin(hwfn, i, ptt);
+
+ qed_ptt_release(hwfn, ptt);
+}
+
+void qed_iov_pf_task(struct work_struct *work)
+{
+ struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+ iov_task.work);
+ int rc;
+
+ if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
+ return;
+
+ if (test_and_clear_bit(QED_IOV_WQ_FLR_FLAG, &hwfn->iov_task_flags)) {
+ struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+ if (!ptt) {
+ qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
+ return;
+ }
+
+ rc = qed_iov_vf_flr_cleanup(hwfn, ptt);
+ if (rc)
+ qed_schedule_iov(hwfn, QED_IOV_WQ_FLR_FLAG);
+
+ qed_ptt_release(hwfn, ptt);
+ }
+
+ if (test_and_clear_bit(QED_IOV_WQ_MSG_FLAG, &hwfn->iov_task_flags))
+ qed_handle_vf_msg(hwfn);
+
+ if (test_and_clear_bit(QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+ &hwfn->iov_task_flags))
+ qed_handle_pf_set_vf_unicast(hwfn);
+
+ if (test_and_clear_bit(QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+ &hwfn->iov_task_flags))
+ qed_handle_bulletin_post(hwfn);
+}
+
+void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
+{
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ if (!cdev->hwfns[i].iov_wq)
+ continue;
+
+ if (schedule_first) {
+ qed_schedule_iov(&cdev->hwfns[i],
+ QED_IOV_WQ_STOP_WQ_FLAG);
+ cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
+ }
+
+ flush_workqueue(cdev->hwfns[i].iov_wq);
+ destroy_workqueue(cdev->hwfns[i].iov_wq);
+ }
+}
+
+int qed_iov_wq_start(struct qed_dev *cdev)
+{
+ char name[NAME_SIZE];
+ int i;
+
+ for_each_hwfn(cdev, i) {
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
+
+ /* PFs needs a dedicated workqueue only if they support IOV.
+ * VFs always require one.
+ */
+ if (IS_PF(p_hwfn->cdev) && !IS_PF_SRIOV(p_hwfn))
+ continue;
+
+ snprintf(name, NAME_SIZE, "iov-%02x:%02x.%02x",
+ cdev->pdev->bus->number,
+ PCI_SLOT(cdev->pdev->devfn), p_hwfn->abs_pf_id);
+
+ p_hwfn->iov_wq = create_singlethread_workqueue(name);
+ if (!p_hwfn->iov_wq) {
+ DP_NOTICE(p_hwfn, "Cannot create iov workqueue\n");
+ return -ENOMEM;
+ }
+
+ if (IS_PF(cdev))
+ INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_pf_task);
+ else
+ INIT_DELAYED_WORK(&p_hwfn->iov_task, qed_iov_vf_task);
+ }
+
+ return 0;
+}
+
+const struct qed_iov_hv_ops qed_iov_ops_pass = {
+ .configure = &qed_sriov_configure,
+ .set_mac = &qed_sriov_pf_set_mac,
+ .set_vlan = &qed_sriov_pf_set_vlan,
+ .get_config = &qed_get_vf_config,
+ .set_link_state = &qed_set_vf_link_state,
+ .set_spoof = &qed_spoof_configure,
+ .set_rate = &qed_set_vf_rate,
+};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
new file mode 100644
index 000000000000..0dd23e409b3f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -0,0 +1,395 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_SRIOV_H
+#define _QED_SRIOV_H
+#include <linux/types.h>
+#include "qed_vf.h"
+
+#define QED_ETH_VF_NUM_MAC_FILTERS 1
+#define QED_ETH_VF_NUM_VLAN_FILTERS 2
+#define QED_VF_ARRAY_LENGTH (3)
+
+#ifdef CONFIG_QED_SRIOV
+#define IS_VF(cdev) ((cdev)->b_is_vf)
+#define IS_PF(cdev) (!((cdev)->b_is_vf))
+#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->cdev->p_iov_info))
+#else
+#define IS_VF(cdev) (0)
+#define IS_PF(cdev) (1)
+#define IS_PF_SRIOV(p_hwfn) (0)
+#endif
+#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info))
+
+#define QED_MAX_VF_CHAINS_PER_PF 16
+
+#define QED_ETH_MAX_VF_NUM_VLAN_FILTERS \
+ (MAX_NUM_VFS * QED_ETH_VF_NUM_VLAN_FILTERS)
+
+enum qed_iov_vport_update_flag {
+ QED_IOV_VP_UPDATE_ACTIVATE,
+ QED_IOV_VP_UPDATE_VLAN_STRIP,
+ QED_IOV_VP_UPDATE_TX_SWITCH,
+ QED_IOV_VP_UPDATE_MCAST,
+ QED_IOV_VP_UPDATE_ACCEPT_PARAM,
+ QED_IOV_VP_UPDATE_RSS,
+ QED_IOV_VP_UPDATE_ACCEPT_ANY_VLAN,
+ QED_IOV_VP_UPDATE_SGE_TPA,
+ QED_IOV_VP_UPDATE_MAX,
+};
+
+struct qed_public_vf_info {
+ /* These copies will later be reflected in the bulletin board,
+ * but this copy should be newer.
+ */
+ u8 forced_mac[ETH_ALEN];
+ u16 forced_vlan;
+ u8 mac[ETH_ALEN];
+
+ /* IFLA_VF_LINK_STATE_<X> */
+ int link_state;
+
+ /* Currently configured Tx rate in MB/sec. 0 if unconfigured */
+ int tx_rate;
+};
+
+/* This struct is part of qed_dev and contains data relevant to all hwfns;
+ * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
+ */
+struct qed_hw_sriov_info {
+ int pos; /* capability position */
+ int nres; /* number of resources */
+ u32 cap; /* SR-IOV Capabilities */
+ u16 ctrl; /* SR-IOV Control */
+ u16 total_vfs; /* total VFs associated with the PF */
+ u16 num_vfs; /* number of vfs that have been started */
+ u16 initial_vfs; /* initial VFs associated with the PF */
+ u16 nr_virtfn; /* number of VFs available */
+ u16 offset; /* first VF Routing ID offset */
+ u16 stride; /* following VF stride */
+ u16 vf_device_id; /* VF device id */
+ u32 pgsz; /* page size for BAR alignment */
+ u8 link; /* Function Dependency Link */
+
+ u32 first_vf_in_pf;
+};
+
+/* This mailbox is maintained per VF in its PF contains all information
+ * required for sending / receiving a message.
+ */
+struct qed_iov_vf_mbx {
+ union vfpf_tlvs *req_virt;
+ dma_addr_t req_phys;
+ union pfvf_tlvs *reply_virt;
+ dma_addr_t reply_phys;
+
+ /* Address in VF where a pending message is located */
+ dma_addr_t pending_req;
+
+ u8 *offset;
+
+ /* saved VF request header */
+ struct vfpf_first_tlv first_tlv;
+};
+
+struct qed_vf_q_info {
+ u16 fw_rx_qid;
+ u16 fw_tx_qid;
+ u8 fw_cid;
+ u8 rxq_active;
+ u8 txq_active;
+};
+
+enum vf_state {
+ VF_FREE = 0, /* VF ready to be acquired holds no resc */
+ VF_ACQUIRED, /* VF, acquired, but not initalized */
+ VF_ENABLED, /* VF, Enabled */
+ VF_RESET, /* VF, FLR'd, pending cleanup */
+ VF_STOPPED /* VF, Stopped */
+};
+
+struct qed_vf_vlan_shadow {
+ bool used;
+ u16 vid;
+};
+
+struct qed_vf_shadow_config {
+ /* Shadow copy of all guest vlans */
+ struct qed_vf_vlan_shadow vlans[QED_ETH_VF_NUM_VLAN_FILTERS + 1];
+
+ /* Shadow copy of all configured MACs; Empty if forcing MACs */
+ u8 macs[QED_ETH_VF_NUM_MAC_FILTERS][ETH_ALEN];
+ u8 inner_vlan_removal;
+};
+
+/* PFs maintain an array of this structure, per VF */
+struct qed_vf_info {
+ struct qed_iov_vf_mbx vf_mbx;
+ enum vf_state state;
+ bool b_init;
+ u8 to_disable;
+
+ struct qed_bulletin bulletin;
+ dma_addr_t vf_bulletin;
+
+ /* PF saves a copy of the last VF acquire message */
+ struct vfpf_acquire_tlv acquire;
+
+ u32 concrete_fid;
+ u16 opaque_fid;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 relative_vf_id;
+ u8 abs_vf_id;
+#define QED_VF_ABS_ID(p_hwfn, p_vf) (QED_PATH_ID(p_hwfn) ? \
+ (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \
+ (p_vf)->abs_vf_id)
+
+ u8 vport_instance;
+ u8 num_rxqs;
+ u8 num_txqs;
+
+ u8 num_sbs;
+
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ struct qed_vf_q_info vf_queues[QED_MAX_VF_CHAINS_PER_PF];
+ u16 igu_sbs[QED_MAX_VF_CHAINS_PER_PF];
+ u8 num_active_rxqs;
+ struct qed_public_vf_info p_vf_info;
+ bool spoof_chk;
+ bool req_spoofchk_val;
+
+ /* Stores the configuration requested by VF */
+ struct qed_vf_shadow_config shadow_config;
+
+ /* A bitfield using bulletin's valid-map bits, used to indicate
+ * which of the bulletin board features have been configured.
+ */
+ u64 configured_features;
+#define QED_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \
+ (1 << VLAN_ADDR_FORCED))
+};
+
+/* This structure is part of qed_hwfn and used only for PFs that have sriov
+ * capability enabled.
+ */
+struct qed_pf_iov {
+ struct qed_vf_info vfs_array[MAX_NUM_VFS];
+ u64 pending_events[QED_VF_ARRAY_LENGTH];
+ u64 pending_flr[QED_VF_ARRAY_LENGTH];
+
+ /* Allocate message address continuosuly and split to each VF */
+ void *mbx_msg_virt_addr;
+ dma_addr_t mbx_msg_phys_addr;
+ u32 mbx_msg_size;
+ void *mbx_reply_virt_addr;
+ dma_addr_t mbx_reply_phys_addr;
+ u32 mbx_reply_size;
+ void *p_bulletins;
+ dma_addr_t bulletins_phys;
+ u32 bulletins_size;
+};
+
+enum qed_iov_wq_flag {
+ QED_IOV_WQ_MSG_FLAG,
+ QED_IOV_WQ_SET_UNICAST_FILTER_FLAG,
+ QED_IOV_WQ_BULLETIN_UPDATE_FLAG,
+ QED_IOV_WQ_STOP_WQ_FLAG,
+ QED_IOV_WQ_FLR_FLAG,
+};
+
+#ifdef CONFIG_QED_SRIOV
+/**
+ * @brief - Given a VF index, return index of next [including that] active VF.
+ *
+ * @param p_hwfn
+ * @param rel_vf_id
+ *
+ * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
+ */
+u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
+
+/**
+ * @brief Read sriov related information and allocated resources
+ * reads from configuraiton space, shmem, etc.
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
+ *
+ * @param p_hwfn
+ * @param p_iov
+ * @param type
+ * @param length
+ *
+ * @return pointer to the newly placed tlv
+ */
+void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
+
+/**
+ * @brief list the types and lengths of the tlvs on the buffer
+ *
+ * @param p_hwfn
+ * @param tlvs_list
+ */
+void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
+
+/**
+ * @brief qed_iov_alloc - allocate sriov related resources
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_iov_alloc(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_iov_setup - setup sriov related resources
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ */
+void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
+/**
+ * @brief qed_iov_free - free sriov related resources
+ *
+ * @param p_hwfn
+ */
+void qed_iov_free(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief free sriov related memory that was allocated during hw_prepare
+ *
+ * @param cdev
+ */
+void qed_iov_free_hw_info(struct qed_dev *cdev);
+
+/**
+ * @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
+ *
+ * @param p_hwfn
+ * @param opcode
+ * @param echo
+ * @param data
+ */
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode, __le16 echo, union event_ring_data *data);
+
+/**
+ * @brief Mark structs of vfs that have been FLR-ed.
+ *
+ * @param p_hwfn
+ * @param disabled_vfs - bitmask of all VFs on path that were FLRed
+ *
+ * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise.
+ */
+int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
+
+/**
+ * @brief Search extended TLVs in request/reply buffer.
+ *
+ * @param p_hwfn
+ * @param p_tlvs_list - Pointer to tlvs list
+ * @param req_type - Type of TLV
+ *
+ * @return pointer to tlv type if found, otherwise returns NULL.
+ */
+void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
+ void *p_tlvs_list, u16 req_type);
+
+void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first);
+int qed_iov_wq_start(struct qed_dev *cdev);
+
+void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag);
+void qed_vf_start_iov_wq(struct qed_dev *cdev);
+int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
+void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
+#else
+static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
+ u16 rel_vf_id)
+{
+ return MAX_NUM_VFS;
+}
+
+static inline int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
+{
+ return 0;
+}
+
+static inline int qed_iov_alloc(struct qed_hwfn *p_hwfn)
+{
+ return 0;
+}
+
+static inline void qed_iov_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+}
+
+static inline void qed_iov_free(struct qed_hwfn *p_hwfn)
+{
+}
+
+static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
+{
+}
+
+static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo, union event_ring_data *data)
+{
+ return -EINVAL;
+}
+
+static inline int qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
+ u32 *disabled_vfs)
+{
+ return 0;
+}
+
+static inline void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
+{
+}
+
+static inline int qed_iov_wq_start(struct qed_dev *cdev)
+{
+ return 0;
+}
+
+static inline void qed_schedule_iov(struct qed_hwfn *hwfn,
+ enum qed_iov_wq_flag flag)
+{
+}
+
+static inline void qed_vf_start_iov_wq(struct qed_dev *cdev)
+{
+}
+
+static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
+{
+ return 0;
+}
+
+static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
+{
+}
+#endif
+
+#define qed_for_each_vf(_p_hwfn, _i) \
+ for (_i = qed_iov_get_next_active_vf(_p_hwfn, 0); \
+ _i < MAX_NUM_VFS; \
+ _i = qed_iov_get_next_active_vf(_p_hwfn, _i + 1))
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
new file mode 100644
index 000000000000..9b780b31b15c
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -0,0 +1,1141 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#include <linux/crc32.h>
+#include <linux/etherdevice.h>
+#include "qed.h"
+#include "qed_sriov.h"
+#include "qed_vf.h"
+
+static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ void *p_tlv;
+
+ /* This lock is released when we receive PF's response
+ * in qed_send_msg2pf().
+ * So, qed_vf_pf_prep() and qed_send_msg2pf()
+ * must come in sequence.
+ */
+ mutex_lock(&(p_iov->mutex));
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "preparing to send 0x%04x tlv over vf pf channel\n",
+ type);
+
+ /* Reset Requst offset */
+ p_iov->offset = (u8 *)p_iov->vf2pf_request;
+
+ /* Clear mailbox - both request and reply */
+ memset(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs));
+ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+
+ /* Init type and length */
+ p_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, type, length);
+
+ /* Init first tlv header */
+ ((struct vfpf_first_tlv *)p_tlv)->reply_address =
+ (u64)p_iov->pf2vf_reply_phys;
+
+ return p_tlv;
+}
+
+static int qed_send_msg2pf(struct qed_hwfn *p_hwfn, u8 *done, u32 resp_size)
+{
+ union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request;
+ struct ustorm_trigger_vf_zone trigger;
+ struct ustorm_vf_zone *zone_data;
+ int rc = 0, time = 100;
+
+ zone_data = (struct ustorm_vf_zone *)PXP_VF_BAR0_START_USDM_ZONE_B;
+
+ /* output tlvs list */
+ qed_dp_tlv_list(p_hwfn, p_req);
+
+ /* need to add the END TLV to the message size */
+ resp_size += sizeof(struct channel_list_end_tlv);
+
+ /* Send TLVs over HW channel */
+ memset(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone));
+ trigger.vf_pf_msg_valid = 1;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n",
+ GET_FIELD(p_hwfn->hw_info.concrete_fid,
+ PXP_CONCRETE_FID_PFID),
+ upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys),
+ &zone_data->non_trigger.vf_pf_msg_addr,
+ *((u32 *)&trigger), &zone_data->trigger);
+
+ REG_WR(p_hwfn,
+ (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo,
+ lower_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ REG_WR(p_hwfn,
+ (uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi,
+ upper_32_bits(p_hwfn->vf_iov_info->vf2pf_request_phys));
+
+ /* The message data must be written first, to prevent trigger before
+ * data is written.
+ */
+ wmb();
+
+ REG_WR(p_hwfn, (uintptr_t)&zone_data->trigger, *((u32 *)&trigger));
+
+ /* When PF would be done with the response, it would write back to the
+ * `done' address. Poll until then.
+ */
+ while ((!*done) && time) {
+ msleep(25);
+ time--;
+ }
+
+ if (!*done) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF <-- PF Timeout [Type %d]\n",
+ p_req->first_tlv.tl.type);
+ rc = -EBUSY;
+ goto exit;
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "PF response: %d [Type %d]\n",
+ *done, p_req->first_tlv.tl.type);
+ }
+
+exit:
+ mutex_unlock(&(p_hwfn->vf_iov_info->mutex));
+
+ return rc;
+}
+
+#define VF_ACQUIRE_THRESH 3
+static void qed_vf_pf_acquire_reduce_resc(struct qed_hwfn *p_hwfn,
+ struct vf_pf_resc_request *p_req,
+ struct pf_vf_resc *p_resp)
+{
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "PF unwilling to fullill resource request: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x]. Try PF recommended amount\n",
+ p_req->num_rxqs,
+ p_resp->num_rxqs,
+ p_req->num_rxqs,
+ p_resp->num_txqs,
+ p_req->num_sbs,
+ p_resp->num_sbs,
+ p_req->num_mac_filters,
+ p_resp->num_mac_filters,
+ p_req->num_vlan_filters,
+ p_resp->num_vlan_filters,
+ p_req->num_mc_filters, p_resp->num_mc_filters);
+
+ /* humble our request */
+ p_req->num_txqs = p_resp->num_txqs;
+ p_req->num_rxqs = p_resp->num_rxqs;
+ p_req->num_sbs = p_resp->num_sbs;
+ p_req->num_mac_filters = p_resp->num_mac_filters;
+ p_req->num_vlan_filters = p_resp->num_vlan_filters;
+ p_req->num_mc_filters = p_resp->num_mc_filters;
+}
+
+static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
+ struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
+ struct vf_pf_resc_request *p_resc;
+ bool resources_acquired = false;
+ struct vfpf_acquire_tlv *req;
+ int rc = 0, attempts = 0;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req));
+ p_resc = &req->resc_request;
+
+ /* starting filling the request */
+ req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid;
+
+ p_resc->num_rxqs = QED_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_txqs = QED_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_sbs = QED_MAX_VF_CHAINS_PER_PF;
+ p_resc->num_mac_filters = QED_ETH_VF_NUM_MAC_FILTERS;
+ p_resc->num_vlan_filters = QED_ETH_VF_NUM_VLAN_FILTERS;
+
+ req->vfdev_info.os_type = VFPF_ACQUIRE_OS_LINUX;
+ req->vfdev_info.fw_major = FW_MAJOR_VERSION;
+ req->vfdev_info.fw_minor = FW_MINOR_VERSION;
+ req->vfdev_info.fw_revision = FW_REVISION_VERSION;
+ req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION;
+ req->vfdev_info.eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+ req->vfdev_info.eth_fp_hsi_minor = ETH_HSI_VER_MINOR;
+
+ /* Fill capability field with any non-deprecated config we support */
+ req->vfdev_info.capabilities |= VFPF_ACQUIRE_CAP_100G;
+
+ /* pf 2 vf bulletin board address */
+ req->bulletin_addr = p_iov->bulletin.phys;
+ req->bulletin_size = p_iov->bulletin.size;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ while (!resources_acquired) {
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV, "attempting to acquire resources\n");
+
+ /* send acquire request */
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ /* copy acquire response from buffer to p_hwfn */
+ memcpy(&p_iov->acquire_resp, resp, sizeof(p_iov->acquire_resp));
+
+ attempts++;
+
+ if (resp->hdr.status == PFVF_STATUS_SUCCESS) {
+ /* PF agrees to allocate our resources */
+ if (!(resp->pfdev_info.capabilities &
+ PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE)) {
+ DP_INFO(p_hwfn,
+ "PF is using old incompatible driver; Either downgrade driver or request provider to update hypervisor version\n");
+ return -EINVAL;
+ }
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV, "resources acquired\n");
+ resources_acquired = true;
+ } else if (resp->hdr.status == PFVF_STATUS_NO_RESOURCE &&
+ attempts < VF_ACQUIRE_THRESH) {
+ qed_vf_pf_acquire_reduce_resc(p_hwfn, p_resc,
+ &resp->resc);
+
+ /* Clear response buffer */
+ memset(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs));
+ } else if ((resp->hdr.status == PFVF_STATUS_NOT_SUPPORTED) &&
+ pfdev_info->major_fp_hsi &&
+ (pfdev_info->major_fp_hsi != ETH_HSI_VER_MAJOR)) {
+ DP_NOTICE(p_hwfn,
+ "PF uses an incompatible fastpath HSI %02x.%02x [VF requires %02x.%02x]. Please change to a VF driver using %02x.xx.\n",
+ pfdev_info->major_fp_hsi,
+ pfdev_info->minor_fp_hsi,
+ ETH_HSI_VER_MAJOR,
+ ETH_HSI_VER_MINOR, pfdev_info->major_fp_hsi);
+ return -EINVAL;
+ } else {
+ DP_ERR(p_hwfn,
+ "PF returned error %d to VF acquisition request\n",
+ resp->hdr.status);
+ return -EAGAIN;
+ }
+ }
+
+ /* Update bulletin board size with response from PF */
+ p_iov->bulletin.size = resp->bulletin_size;
+
+ /* get HW info */
+ p_hwfn->cdev->type = resp->pfdev_info.dev_type;
+ p_hwfn->cdev->chip_rev = resp->pfdev_info.chip_rev;
+
+ p_hwfn->cdev->chip_num = pfdev_info->chip_num & 0xffff;
+
+ /* Learn of the possibility of CMT */
+ if (IS_LEAD_HWFN(p_hwfn)) {
+ if (resp->pfdev_info.capabilities & PFVF_ACQUIRE_CAP_100G) {
+ DP_NOTICE(p_hwfn, "100g VF\n");
+ p_hwfn->cdev->num_hwfns = 2;
+ }
+ }
+
+ if (ETH_HSI_VER_MINOR &&
+ (resp->pfdev_info.minor_fp_hsi < ETH_HSI_VER_MINOR)) {
+ DP_INFO(p_hwfn,
+ "PF is using older fastpath HSI; %02x.%02x is configured\n",
+ ETH_HSI_VER_MAJOR, resp->pfdev_info.minor_fp_hsi);
+ }
+
+ return 0;
+}
+
+int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov;
+ u32 reg;
+
+ /* Set number of hwfns - might be overriden once leading hwfn learns
+ * actual configuration from PF.
+ */
+ if (IS_LEAD_HWFN(p_hwfn))
+ p_hwfn->cdev->num_hwfns = 1;
+
+ /* Set the doorbell bar. Assumption: regview is set */
+ p_hwfn->doorbells = (u8 __iomem *)p_hwfn->regview +
+ PXP_VF_BAR0_START_DQ;
+
+ reg = PXP_VF_BAR0_ME_OPAQUE_ADDRESS;
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, reg);
+
+ reg = PXP_VF_BAR0_ME_CONCRETE_ADDRESS;
+ p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, reg);
+
+ /* Allocate vf sriov info */
+ p_iov = kzalloc(sizeof(*p_iov), GFP_KERNEL);
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_sriov'\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate vf2pf msg */
+ p_iov->vf2pf_request = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ &p_iov->vf2pf_request_phys,
+ GFP_KERNEL);
+ if (!p_iov->vf2pf_request) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate `vf2pf_request' DMA memory\n");
+ goto free_p_iov;
+ }
+
+ p_iov->pf2vf_reply = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ &p_iov->pf2vf_reply_phys,
+ GFP_KERNEL);
+ if (!p_iov->pf2vf_reply) {
+ DP_NOTICE(p_hwfn,
+ "Failed to allocate `pf2vf_reply' DMA memory\n");
+ goto free_vf2pf_request;
+ }
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_IOV,
+ "VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
+ p_iov->vf2pf_request,
+ (u64) p_iov->vf2pf_request_phys,
+ p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
+
+ /* Allocate Bulletin board */
+ p_iov->bulletin.size = sizeof(struct qed_bulletin_content);
+ p_iov->bulletin.p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
+ p_iov->bulletin.size,
+ &p_iov->bulletin.phys,
+ GFP_KERNEL);
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF's bulletin Board [%p virt 0x%llx phys 0x%08x bytes]\n",
+ p_iov->bulletin.p_virt,
+ (u64)p_iov->bulletin.phys, p_iov->bulletin.size);
+
+ mutex_init(&p_iov->mutex);
+
+ p_hwfn->vf_iov_info = p_iov;
+
+ p_hwfn->hw_info.personality = QED_PCI_ETH;
+
+ return qed_vf_pf_acquire(p_hwfn);
+
+free_vf2pf_request:
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ p_iov->vf2pf_request, p_iov->vf2pf_request_phys);
+free_p_iov:
+ kfree(p_iov);
+
+ return -ENOMEM;
+}
+
+int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ u8 rx_qid,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
+ struct vfpf_start_rxq_tlv *req;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req));
+
+ req->rx_qid = rx_qid;
+ req->cqe_pbl_addr = cqe_pbl_addr;
+ req->cqe_pbl_size = cqe_pbl_size;
+ req->rxq_addr = bd_chain_phys_addr;
+ req->hw_sb = sb;
+ req->sb_index = sb_index;
+ req->bd_max_bytes = bd_max_bytes;
+ req->stat_id = -1;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->queue_start;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ /* Learn the address of the producer from the response */
+ if (pp_prod) {
+ u32 init_prod_val = 0;
+
+ *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Rxq[0x%02x]: producer at %p [offset 0x%08x]\n",
+ rx_qid, *pp_prod, resp->offset);
+
+ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
+ __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
+ (u32 *)&init_prod_val);
+ }
+
+ return rc;
+}
+
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_rxqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
+
+ req->rx_qid = rx_qid;
+ req->num_rxqs = 1;
+ req->cqe_completion = cqe_completion;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_start_queue_resp_tlv *resp;
+ struct vfpf_start_txq_tlv *req;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
+
+ req->tx_qid = tx_queue_id;
+
+ /* Tx */
+ req->pbl_addr = pbl_addr;
+ req->pbl_size = pbl_size;
+ req->hw_sb = sb;
+ req->sb_index = sb_index;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->queue_start;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ goto exit;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (pp_doorbell) {
+ *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
+ tx_queue_id, *pp_doorbell, resp->offset);
+ }
+exit:
+
+ return rc;
+}
+
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_stop_txqs_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
+
+ req->tx_qid = tx_qid;
+ req->num_txqs = 1;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum qed_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe, u8 only_untagged)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_start_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc, i;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req));
+
+ req->mtu = mtu;
+ req->vport_id = vport_id;
+ req->inner_vlan_removal = inner_vlan_removal;
+ req->tpa_mode = tpa_mode;
+ req->max_buffers_per_cqe = max_buffers_per_cqe;
+ req->only_untagged = only_untagged;
+
+ /* status blocks */
+ for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++)
+ if (p_hwfn->sbs_info[i])
+ req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return rc;
+}
+
+static bool
+qed_vf_handle_vp_update_is_needed(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data,
+ u16 tlv)
+{
+ switch (tlv) {
+ case CHANNEL_TLV_VPORT_UPDATE_ACTIVATE:
+ return !!(p_data->update_vport_active_rx_flg ||
+ p_data->update_vport_active_tx_flg);
+ case CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH:
+ return !!p_data->update_tx_switching_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP:
+ return !!p_data->update_inner_vlan_removal_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN:
+ return !!p_data->update_accept_any_vlan_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_MCAST:
+ return !!p_data->update_approx_mcast_flg;
+ case CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM:
+ return !!(p_data->accept_flags.update_rx_mode_config ||
+ p_data->accept_flags.update_tx_mode_config);
+ case CHANNEL_TLV_VPORT_UPDATE_RSS:
+ return !!p_data->rss_params;
+ case CHANNEL_TLV_VPORT_UPDATE_SGE_TPA:
+ return !!p_data->sge_tpa_params;
+ default:
+ DP_INFO(p_hwfn, "Unexpected vport-update TLV[%d]\n",
+ tlv);
+ return false;
+ }
+}
+
+static void
+qed_vf_handle_vp_update_tlvs_resp(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_data)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *p_resp;
+ u16 tlv;
+
+ for (tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
+ tlv < CHANNEL_TLV_VPORT_UPDATE_MAX; tlv++) {
+ if (!qed_vf_handle_vp_update_is_needed(p_hwfn, p_data, tlv))
+ continue;
+
+ p_resp = (struct pfvf_def_resp_tlv *)
+ qed_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply,
+ tlv);
+ if (p_resp && p_resp->hdr.status)
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "TLV[%d] Configuration %s\n",
+ tlv,
+ (p_resp && p_resp->hdr.status) ? "succeeded"
+ : "failed");
+ }
+}
+
+int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_vport_update_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ u8 update_rx, update_tx;
+ u32 resp_size = 0;
+ u16 size, tlv;
+ int rc;
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ resp_size = sizeof(*resp);
+
+ update_rx = p_params->update_vport_active_rx_flg;
+ update_tx = p_params->update_vport_active_tx_flg;
+
+ /* clear mailbox and prep header tlv */
+ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req));
+
+ /* Prepare extended tlvs */
+ if (update_rx || update_tx) {
+ struct vfpf_vport_update_activate_tlv *p_act_tlv;
+
+ size = sizeof(struct vfpf_vport_update_activate_tlv);
+ p_act_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_act_tlv->update_rx = update_rx;
+ p_act_tlv->active_rx = p_params->vport_active_rx_flg;
+ }
+
+ if (update_tx) {
+ p_act_tlv->update_tx = update_tx;
+ p_act_tlv->active_tx = p_params->vport_active_tx_flg;
+ }
+ }
+
+ if (p_params->update_tx_switching_flg) {
+ struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
+
+ size = sizeof(struct vfpf_vport_update_tx_switch_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
+ p_tx_switch_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg;
+ }
+
+ if (p_params->update_approx_mcast_flg) {
+ struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
+
+ size = sizeof(struct vfpf_vport_update_mcast_bin_tlv);
+ p_mcast_tlv = qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ memcpy(p_mcast_tlv->bins, p_params->bins,
+ sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ }
+
+ update_rx = p_params->accept_flags.update_rx_mode_config;
+ update_tx = p_params->accept_flags.update_tx_mode_config;
+
+ if (update_rx || update_tx) {
+ struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
+
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
+ size = sizeof(struct vfpf_vport_update_accept_param_tlv);
+ p_accept_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (update_rx) {
+ p_accept_tlv->update_rx_mode = update_rx;
+ p_accept_tlv->rx_accept_filter =
+ p_params->accept_flags.rx_accept_filter;
+ }
+
+ if (update_tx) {
+ p_accept_tlv->update_tx_mode = update_tx;
+ p_accept_tlv->tx_accept_filter =
+ p_params->accept_flags.tx_accept_filter;
+ }
+ }
+
+ if (p_params->rss_params) {
+ struct qed_rss_params *rss_params = p_params->rss_params;
+ struct vfpf_vport_update_rss_tlv *p_rss_tlv;
+
+ size = sizeof(struct vfpf_vport_update_rss_tlv);
+ p_rss_tlv = qed_add_tlv(p_hwfn,
+ &p_iov->offset,
+ CHANNEL_TLV_VPORT_UPDATE_RSS, size);
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+
+ if (rss_params->update_rss_config)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CONFIG_FLAG;
+ if (rss_params->update_rss_capabilities)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_CAPS_FLAG;
+ if (rss_params->update_rss_ind_table)
+ p_rss_tlv->update_rss_flags |=
+ VFPF_UPDATE_RSS_IND_TABLE_FLAG;
+ if (rss_params->update_rss_key)
+ p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG;
+
+ p_rss_tlv->rss_enable = rss_params->rss_enable;
+ p_rss_tlv->rss_caps = rss_params->rss_caps;
+ p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log;
+ memcpy(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table,
+ sizeof(rss_params->rss_ind_table));
+ memcpy(p_rss_tlv->rss_key, rss_params->rss_key,
+ sizeof(rss_params->rss_key));
+ }
+
+ if (p_params->update_accept_any_vlan_flg) {
+ struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv;
+
+ size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv);
+ tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
+ p_any_vlan_tlv = qed_add_tlv(p_hwfn, &p_iov->offset, tlv, size);
+
+ resp_size += sizeof(struct pfvf_def_resp_tlv);
+ p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan;
+ p_any_vlan_tlv->update_accept_any_vlan_flg =
+ p_params->update_accept_any_vlan_flg;
+ }
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size);
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ qed_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params);
+
+ return rc;
+}
+
+int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EAGAIN;
+
+ p_hwfn->b_int_enabled = 0;
+
+ return 0;
+}
+
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp;
+ struct vfpf_first_tlv *req;
+ u32 size;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ if (!rc && resp->hdr.status != PFVF_STATUS_SUCCESS)
+ rc = -EAGAIN;
+
+ p_hwfn->b_int_enabled = 0;
+
+ if (p_iov->vf2pf_request)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union vfpf_tlvs),
+ p_iov->vf2pf_request,
+ p_iov->vf2pf_request_phys);
+ if (p_iov->pf2vf_reply)
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ sizeof(union pfvf_tlvs),
+ p_iov->pf2vf_reply, p_iov->pf2vf_reply_phys);
+
+ if (p_iov->bulletin.p_virt) {
+ size = sizeof(struct qed_bulletin_content);
+ dma_free_coherent(&p_hwfn->cdev->pdev->dev,
+ size,
+ p_iov->bulletin.p_virt, p_iov->bulletin.phys);
+ }
+
+ kfree(p_hwfn->vf_iov_info);
+ p_hwfn->vf_iov_info = NULL;
+
+ return rc;
+}
+
+void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd)
+{
+ struct qed_sp_vport_update_params sp_params;
+ int i;
+
+ memset(&sp_params, 0, sizeof(sp_params));
+ sp_params.update_approx_mcast_flg = 1;
+
+ if (p_filter_cmd->opcode == QED_FILTER_ADD) {
+ for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
+ u32 bit;
+
+ bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
+ __set_bit(bit, sp_params.bins);
+ }
+ }
+
+ qed_vf_pf_vport_update(p_hwfn, &sp_params);
+}
+
+int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_ucast *p_ucast)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct vfpf_ucast_filter_tlv *req;
+ struct pfvf_def_resp_tlv *resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
+ req->opcode = (u8) p_ucast->opcode;
+ req->type = (u8) p_ucast->type;
+ memcpy(req->mac, p_ucast->mac, ETH_ALEN);
+ req->vlan = p_ucast->vlan;
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ resp = &p_iov->pf2vf_reply->default_resp;
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EAGAIN;
+
+ return 0;
+}
+
+int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp;
+ int rc;
+
+ /* clear mailbox and prep first tlv */
+ qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP,
+ sizeof(struct vfpf_first_tlv));
+
+ /* add list termination tlv */
+ qed_add_tlv(p_hwfn, &p_iov->offset,
+ CHANNEL_TLV_LIST_END, sizeof(struct channel_list_end_tlv));
+
+ rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+ if (rc)
+ return rc;
+
+ if (resp->hdr.status != PFVF_STATUS_SUCCESS)
+ return -EINVAL;
+
+ return 0;
+}
+
+u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+
+ if (!p_iov) {
+ DP_NOTICE(p_hwfn, "vf_sriov_info isn't initialized\n");
+ return 0;
+ }
+
+ return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id;
+}
+
+int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change)
+{
+ struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
+ struct qed_bulletin_content shadow;
+ u32 crc, crc_size;
+
+ crc_size = sizeof(p_iov->bulletin.p_virt->crc);
+ *p_change = 0;
+
+ /* Need to guarantee PF is not in the middle of writing it */
+ memcpy(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size);
+
+ /* If version did not update, no need to do anything */
+ if (shadow.version == p_iov->bulletin_shadow.version)
+ return 0;
+
+ /* Verify the bulletin we see is valid */
+ crc = crc32(0, (u8 *)&shadow + crc_size,
+ p_iov->bulletin.size - crc_size);
+ if (crc != shadow.crc)
+ return -EAGAIN;
+
+ /* Set the shadow bulletin and process it */
+ memcpy(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size);
+
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "Read a bulletin update %08x\n", shadow.version);
+
+ *p_change = 1;
+
+ return 0;
+}
+
+void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *p_params,
+ struct qed_bulletin_content *p_bulletin)
+{
+ memset(p_params, 0, sizeof(*p_params));
+
+ p_params->speed.autoneg = p_bulletin->req_autoneg;
+ p_params->speed.advertised_speeds = p_bulletin->req_adv_speed;
+ p_params->speed.forced_speed = p_bulletin->req_forced_speed;
+ p_params->pause.autoneg = p_bulletin->req_autoneg_pause;
+ p_params->pause.forced_rx = p_bulletin->req_forced_rx;
+ p_params->pause.forced_tx = p_bulletin->req_forced_tx;
+ p_params->loopback_mode = p_bulletin->req_loopback;
+}
+
+void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params)
+{
+ __qed_vf_get_link_params(p_hwfn, params,
+ &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content *p_bulletin)
+{
+ memset(p_link, 0, sizeof(*p_link));
+
+ p_link->link_up = p_bulletin->link_up;
+ p_link->speed = p_bulletin->speed;
+ p_link->full_duplex = p_bulletin->full_duplex;
+ p_link->an = p_bulletin->autoneg;
+ p_link->an_complete = p_bulletin->autoneg_complete;
+ p_link->parallel_detection = p_bulletin->parallel_detection;
+ p_link->pfc_enabled = p_bulletin->pfc_enabled;
+ p_link->partner_adv_speed = p_bulletin->partner_adv_speed;
+ p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en;
+ p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en;
+ p_link->partner_adv_pause = p_bulletin->partner_adv_pause;
+ p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault;
+}
+
+void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link)
+{
+ __qed_vf_get_link_state(p_hwfn, link,
+ &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+ struct qed_bulletin_content *p_bulletin)
+{
+ memset(p_link_caps, 0, sizeof(*p_link_caps));
+ p_link_caps->speed_capabilities = p_bulletin->capability_speed;
+}
+
+void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps)
+{
+ __qed_vf_get_link_caps(p_hwfn, p_link_caps,
+ &(p_hwfn->vf_iov_info->bulletin_shadow));
+}
+
+void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
+{
+ *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs;
+}
+
+void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
+{
+ memcpy(port_mac,
+ p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, ETH_ALEN);
+}
+
+void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters)
+{
+ struct qed_vf_iov *p_vf;
+
+ p_vf = p_hwfn->vf_iov_info;
+ *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters;
+}
+
+bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
+{
+ struct qed_bulletin_content *bulletin;
+
+ bulletin = &p_hwfn->vf_iov_info->bulletin_shadow;
+ if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)))
+ return true;
+
+ /* Forbid VF from changing a MAC enforced by PF */
+ if (ether_addr_equal(bulletin->mac, mac))
+ return false;
+
+ return false;
+}
+
+bool qed_vf_bulletin_get_forced_mac(struct qed_hwfn *hwfn,
+ u8 *dst_mac, u8 *p_is_forced)
+{
+ struct qed_bulletin_content *bulletin;
+
+ bulletin = &hwfn->vf_iov_info->bulletin_shadow;
+
+ if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
+ if (p_is_forced)
+ *p_is_forced = 1;
+ } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) {
+ if (p_is_forced)
+ *p_is_forced = 0;
+ } else {
+ return false;
+ }
+
+ ether_addr_copy(dst_mac, bulletin->mac);
+
+ return true;
+}
+
+void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng)
+{
+ struct pf_vf_pfdev_info *info;
+
+ info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info;
+
+ *fw_major = info->fw_major;
+ *fw_minor = info->fw_minor;
+ *fw_rev = info->fw_rev;
+ *fw_eng = info->fw_eng;
+}
+
+static void qed_handle_bulletin_change(struct qed_hwfn *hwfn)
+{
+ struct qed_eth_cb_ops *ops = hwfn->cdev->protocol_ops.eth;
+ u8 mac[ETH_ALEN], is_mac_exist, is_mac_forced;
+ void *cookie = hwfn->cdev->ops_cookie;
+
+ is_mac_exist = qed_vf_bulletin_get_forced_mac(hwfn, mac,
+ &is_mac_forced);
+ if (is_mac_exist && is_mac_forced && cookie)
+ ops->force_mac(cookie, mac);
+
+ /* Always update link configuration according to bulletin */
+ qed_link_update(hwfn);
+}
+
+void qed_iov_vf_task(struct work_struct *work)
+{
+ struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+ iov_task.work);
+ u8 change = 0;
+
+ if (test_and_clear_bit(QED_IOV_WQ_STOP_WQ_FLAG, &hwfn->iov_task_flags))
+ return;
+
+ /* Handle bulletin board changes */
+ qed_vf_read_bulletin(hwfn, &change);
+ if (change)
+ qed_handle_bulletin_change(hwfn);
+
+ /* As VF is polling bulletin board, need to constantly re-schedule */
+ queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, HZ);
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
new file mode 100644
index 000000000000..b23ce58e932f
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -0,0 +1,999 @@
+/* QLogic qed NIC Driver
+ * Copyright (c) 2015 QLogic Corporation
+ *
+ * This software is available under the terms of the GNU General Public License
+ * (GPL) Version 2, available from the file COPYING in the main directory of
+ * this source tree.
+ */
+
+#ifndef _QED_VF_H
+#define _QED_VF_H
+
+#include "qed_l2.h"
+#include "qed_mcp.h"
+
+#define T_ETH_INDIRECTION_TABLE_SIZE 128
+#define T_ETH_RSS_KEY_SIZE 10
+
+struct vf_pf_resc_request {
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u16 padding;
+};
+
+struct hw_sb_info {
+ u16 hw_sb_id;
+ u8 sb_qid;
+ u8 padding[5];
+};
+
+#define TLV_BUFFER_SIZE 1024
+
+enum {
+ PFVF_STATUS_WAITING,
+ PFVF_STATUS_SUCCESS,
+ PFVF_STATUS_FAILURE,
+ PFVF_STATUS_NOT_SUPPORTED,
+ PFVF_STATUS_NO_RESOURCE,
+ PFVF_STATUS_FORCED,
+};
+
+/* vf pf channel tlvs */
+/* general tlv header (used for both vf->pf request and pf->vf response) */
+struct channel_tlv {
+ u16 type;
+ u16 length;
+};
+
+/* header of first vf->pf tlv carries the offset used to calculate reponse
+ * buffer address
+ */
+struct vfpf_first_tlv {
+ struct channel_tlv tl;
+ u32 padding;
+ u64 reply_address;
+};
+
+/* header of pf->vf tlvs, carries the status of handling the request */
+struct pfvf_tlv {
+ struct channel_tlv tl;
+ u8 status;
+ u8 padding[3];
+};
+
+/* response tlv used for most tlvs */
+struct pfvf_def_resp_tlv {
+ struct pfvf_tlv hdr;
+};
+
+/* used to terminate and pad a tlv list */
+struct channel_list_end_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+};
+
+#define VFPF_ACQUIRE_OS_LINUX (0)
+#define VFPF_ACQUIRE_OS_WINDOWS (1)
+#define VFPF_ACQUIRE_OS_ESX (2)
+#define VFPF_ACQUIRE_OS_SOLARIS (3)
+#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4)
+
+struct vfpf_acquire_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_vfdev_info {
+#define VFPF_ACQUIRE_CAP_OBSOLETE (1 << 0)
+#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
+ u64 capabilities;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_revision;
+ u8 fw_engineering;
+ u32 driver_version;
+ u16 opaque_fid; /* ME register value */
+ u8 os_type; /* VFPF_ACQUIRE_OS_* value */
+ u8 eth_fp_hsi_major;
+ u8 eth_fp_hsi_minor;
+ u8 padding[3];
+ } vfdev_info;
+
+ struct vf_pf_resc_request resc_request;
+
+ u64 bulletin_addr;
+ u32 bulletin_size;
+ u32 padding;
+};
+
+/* receive side scaling tlv */
+struct vfpf_vport_update_rss_tlv {
+ struct channel_tlv tl;
+
+ u8 update_rss_flags;
+#define VFPF_UPDATE_RSS_CONFIG_FLAG BIT(0)
+#define VFPF_UPDATE_RSS_CAPS_FLAG BIT(1)
+#define VFPF_UPDATE_RSS_IND_TABLE_FLAG BIT(2)
+#define VFPF_UPDATE_RSS_KEY_FLAG BIT(3)
+
+ u8 rss_enable;
+ u8 rss_caps;
+ u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */
+ u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE];
+ u32 rss_key[T_ETH_RSS_KEY_SIZE];
+};
+
+struct pfvf_storm_stats {
+ u32 address;
+ u32 len;
+};
+
+struct pfvf_stats_info {
+ struct pfvf_storm_stats mstats;
+ struct pfvf_storm_stats pstats;
+ struct pfvf_storm_stats tstats;
+ struct pfvf_storm_stats ustats;
+};
+
+struct pfvf_acquire_resp_tlv {
+ struct pfvf_tlv hdr;
+
+ struct pf_vf_pfdev_info {
+ u32 chip_num;
+ u32 mfw_ver;
+
+ u16 fw_major;
+ u16 fw_minor;
+ u16 fw_rev;
+ u16 fw_eng;
+
+ u64 capabilities;
+#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED BIT(0)
+#define PFVF_ACQUIRE_CAP_100G BIT(1) /* If set, 100g PF */
+/* There are old PF versions where the PF might mistakenly override the sanity
+ * mechanism [version-based] and allow a VF that can't be supported to pass
+ * the acquisition phase.
+ * To overcome this, PFs now indicate that they're past that point and the new
+ * VFs would fail probe on the older PFs that fail to do so.
+ */
+#define PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE BIT(2)
+
+ u16 db_size;
+ u8 indices_per_sb;
+ u8 os_type;
+
+ /* These should match the PF's qed_dev values */
+ u16 chip_rev;
+ u8 dev_type;
+
+ u8 padding;
+
+ struct pfvf_stats_info stats_info;
+
+ u8 port_mac[ETH_ALEN];
+
+ /* It's possible PF had to configure an older fastpath HSI
+ * [in case VF is newer than PF]. This is communicated back
+ * to the VF. It can also be used in case of error due to
+ * non-matching versions to shed light in VF about failure.
+ */
+ u8 major_fp_hsi;
+ u8 minor_fp_hsi;
+ } pfdev_info;
+
+ struct pf_vf_resc {
+#define PFVF_MAX_QUEUES_PER_VF 16
+#define PFVF_MAX_SBS_PER_VF 16
+ struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF];
+ u8 hw_qid[PFVF_MAX_QUEUES_PER_VF];
+ u8 cid[PFVF_MAX_QUEUES_PER_VF];
+
+ u8 num_rxqs;
+ u8 num_txqs;
+ u8 num_sbs;
+ u8 num_mac_filters;
+ u8 num_vlan_filters;
+ u8 num_mc_filters;
+ u8 padding[2];
+ } resc;
+
+ u32 bulletin_size;
+ u32 padding;
+};
+
+struct pfvf_start_queue_resp_tlv {
+ struct pfvf_tlv hdr;
+ u32 offset; /* offset to consumer/producer of queue */
+ u8 padding[4];
+};
+
+/* Setup Queue */
+struct vfpf_start_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ u64 rxq_addr;
+ u64 deprecated_sge_addr;
+ u64 cqe_pbl_addr;
+
+ u16 cqe_pbl_size;
+ u16 hw_sb;
+ u16 rx_qid;
+ u16 hc_rate; /* desired interrupts per sec. */
+
+ u16 bd_max_bytes;
+ u16 stat_id;
+ u8 sb_index;
+ u8 padding[3];
+};
+
+struct vfpf_start_txq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ /* physical addresses */
+ u64 pbl_addr;
+ u16 pbl_size;
+ u16 stat_id;
+ u16 tx_qid;
+ u16 hw_sb;
+
+ u32 flags; /* VFPF_QUEUE_FLG_X flags */
+ u16 hc_rate; /* desired interrupts per sec. */
+ u8 sb_index;
+ u8 padding[3];
+};
+
+/* Stop RX Queue */
+struct vfpf_stop_rxqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 cqe_completion;
+ u8 padding[4];
+};
+
+/* Stop TX Queues */
+struct vfpf_stop_txqs_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u16 tx_qid;
+ u8 num_txqs;
+ u8 padding[5];
+};
+
+struct vfpf_update_rxq_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF];
+
+ u16 rx_qid;
+ u8 num_rxqs;
+ u8 flags;
+#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG BIT(0)
+#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG BIT(1)
+#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG BIT(2)
+
+ u8 padding[4];
+};
+
+/* Set Queue Filters */
+struct vfpf_q_mac_vlan_filter {
+ u32 flags;
+#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01
+#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02
+#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */
+
+ u8 mac[ETH_ALEN];
+ u16 vlan_tag;
+
+ u8 padding[4];
+};
+
+/* Start a vport */
+struct vfpf_vport_start_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u64 sb_addr[PFVF_MAX_SBS_PER_VF];
+
+ u32 tpa_mode;
+ u16 dep1;
+ u16 mtu;
+
+ u8 vport_id;
+ u8 inner_vlan_removal;
+
+ u8 only_untagged;
+ u8 max_buffers_per_cqe;
+
+ u8 padding[4];
+};
+
+/* Extended tlvs - need to add rss, mcast, accept mode tlvs */
+struct vfpf_vport_update_activate_tlv {
+ struct channel_tlv tl;
+ u8 update_rx;
+ u8 update_tx;
+ u8 active_rx;
+ u8 active_tx;
+};
+
+struct vfpf_vport_update_tx_switch_tlv {
+ struct channel_tlv tl;
+ u8 tx_switching;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_vlan_strip_tlv {
+ struct channel_tlv tl;
+ u8 remove_vlan;
+ u8 padding[3];
+};
+
+struct vfpf_vport_update_mcast_bin_tlv {
+ struct channel_tlv tl;
+ u8 padding[4];
+
+ u64 bins[8];
+};
+
+struct vfpf_vport_update_accept_param_tlv {
+ struct channel_tlv tl;
+ u8 update_rx_mode;
+ u8 update_tx_mode;
+ u8 rx_accept_filter;
+ u8 tx_accept_filter;
+};
+
+struct vfpf_vport_update_accept_any_vlan_tlv {
+ struct channel_tlv tl;
+ u8 update_accept_any_vlan_flg;
+ u8 accept_any_vlan;
+
+ u8 padding[2];
+};
+
+struct vfpf_vport_update_sge_tpa_tlv {
+ struct channel_tlv tl;
+
+ u16 sge_tpa_flags;
+#define VFPF_TPA_IPV4_EN_FLAG BIT(0)
+#define VFPF_TPA_IPV6_EN_FLAG BIT(1)
+#define VFPF_TPA_PKT_SPLIT_FLAG BIT(2)
+#define VFPF_TPA_HDR_DATA_SPLIT_FLAG BIT(3)
+#define VFPF_TPA_GRO_CONSIST_FLAG BIT(4)
+
+ u8 update_sge_tpa_flags;
+#define VFPF_UPDATE_SGE_DEPRECATED_FLAG BIT(0)
+#define VFPF_UPDATE_TPA_EN_FLAG BIT(1)
+#define VFPF_UPDATE_TPA_PARAM_FLAG BIT(2)
+
+ u8 max_buffers_per_cqe;
+
+ u16 deprecated_sge_buff_size;
+ u16 tpa_max_size;
+ u16 tpa_min_size_to_start;
+ u16 tpa_min_size_to_cont;
+
+ u8 tpa_max_aggs_num;
+ u8 padding[7];
+};
+
+/* Primary tlv as a header for various extended tlvs for
+ * various functionalities in vport update ramrod.
+ */
+struct vfpf_vport_update_tlv {
+ struct vfpf_first_tlv first_tlv;
+};
+
+struct vfpf_ucast_filter_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ u8 opcode;
+ u8 type;
+
+ u8 mac[ETH_ALEN];
+
+ u16 vlan;
+ u16 padding[3];
+};
+
+struct tlv_buffer_size {
+ u8 tlv_buffer[TLV_BUFFER_SIZE];
+};
+
+union vfpf_tlvs {
+ struct vfpf_first_tlv first_tlv;
+ struct vfpf_acquire_tlv acquire;
+ struct vfpf_start_rxq_tlv start_rxq;
+ struct vfpf_start_txq_tlv start_txq;
+ struct vfpf_stop_rxqs_tlv stop_rxqs;
+ struct vfpf_stop_txqs_tlv stop_txqs;
+ struct vfpf_update_rxq_tlv update_rxq;
+ struct vfpf_vport_start_tlv start_vport;
+ struct vfpf_vport_update_tlv vport_update;
+ struct vfpf_ucast_filter_tlv ucast_filter;
+ struct channel_list_end_tlv list_end;
+ struct tlv_buffer_size tlv_buf_size;
+};
+
+union pfvf_tlvs {
+ struct pfvf_def_resp_tlv default_resp;
+ struct pfvf_acquire_resp_tlv acquire_resp;
+ struct tlv_buffer_size tlv_buf_size;
+ struct pfvf_start_queue_resp_tlv queue_start;
+};
+
+enum qed_bulletin_bit {
+ /* Alert the VF that a forced MAC was set by the PF */
+ MAC_ADDR_FORCED = 0,
+ /* Alert the VF that a forced VLAN was set by the PF */
+ VLAN_ADDR_FORCED = 2,
+
+ /* Indicate that `default_only_untagged' contains actual data */
+ VFPF_BULLETIN_UNTAGGED_DEFAULT = 3,
+ VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4,
+
+ /* Alert the VF that suggested mac was sent by the PF.
+ * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set.
+ */
+ VFPF_BULLETIN_MAC_ADDR = 5
+};
+
+struct qed_bulletin_content {
+ /* crc of structure to ensure is not in mid-update */
+ u32 crc;
+
+ u32 version;
+
+ /* bitmap indicating which fields hold valid values */
+ u64 valid_bitmap;
+
+ /* used for MAC_ADDR or MAC_ADDR_FORCED */
+ u8 mac[ETH_ALEN];
+
+ /* If valid, 1 => only untagged Rx if no vlan is configured */
+ u8 default_only_untagged;
+ u8 padding;
+
+ /* The following is a 'copy' of qed_mcp_link_state,
+ * qed_mcp_link_params and qed_mcp_link_capabilities. Since it's
+ * possible the structs will increase further along the road we cannot
+ * have it here; Instead we need to have all of its fields.
+ */
+ u8 req_autoneg;
+ u8 req_autoneg_pause;
+ u8 req_forced_rx;
+ u8 req_forced_tx;
+ u8 padding2[4];
+
+ u32 req_adv_speed;
+ u32 req_forced_speed;
+ u32 req_loopback;
+ u32 padding3;
+
+ u8 link_up;
+ u8 full_duplex;
+ u8 autoneg;
+ u8 autoneg_complete;
+ u8 parallel_detection;
+ u8 pfc_enabled;
+ u8 partner_tx_flow_ctrl_en;
+ u8 partner_rx_flow_ctrl_en;
+ u8 partner_adv_pause;
+ u8 sfp_tx_fault;
+ u8 padding4[6];
+
+ u32 speed;
+ u32 partner_adv_speed;
+
+ u32 capability_speed;
+
+ /* Forced vlan */
+ u16 pvid;
+ u16 padding5;
+};
+
+struct qed_bulletin {
+ dma_addr_t phys;
+ struct qed_bulletin_content *p_virt;
+ u32 size;
+};
+
+enum {
+ CHANNEL_TLV_NONE, /* ends tlv sequence */
+ CHANNEL_TLV_ACQUIRE,
+ CHANNEL_TLV_VPORT_START,
+ CHANNEL_TLV_VPORT_UPDATE,
+ CHANNEL_TLV_VPORT_TEARDOWN,
+ CHANNEL_TLV_START_RXQ,
+ CHANNEL_TLV_START_TXQ,
+ CHANNEL_TLV_STOP_RXQS,
+ CHANNEL_TLV_STOP_TXQS,
+ CHANNEL_TLV_UPDATE_RXQ,
+ CHANNEL_TLV_INT_CLEANUP,
+ CHANNEL_TLV_CLOSE,
+ CHANNEL_TLV_RELEASE,
+ CHANNEL_TLV_LIST_END,
+ CHANNEL_TLV_UCAST_FILTER,
+ CHANNEL_TLV_VPORT_UPDATE_ACTIVATE,
+ CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH,
+ CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP,
+ CHANNEL_TLV_VPORT_UPDATE_MCAST,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM,
+ CHANNEL_TLV_VPORT_UPDATE_RSS,
+ CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN,
+ CHANNEL_TLV_VPORT_UPDATE_SGE_TPA,
+ CHANNEL_TLV_MAX,
+
+ /* Required for iterating over vport-update tlvs.
+ * Will break in case non-sequential vport-update tlvs.
+ */
+ CHANNEL_TLV_VPORT_UPDATE_MAX = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA + 1,
+};
+
+/* This data is held in the qed_hwfn structure for VFs only. */
+struct qed_vf_iov {
+ union vfpf_tlvs *vf2pf_request;
+ dma_addr_t vf2pf_request_phys;
+ union pfvf_tlvs *pf2vf_reply;
+ dma_addr_t pf2vf_reply_phys;
+
+ /* Should be taken whenever the mailbox buffers are accessed */
+ struct mutex mutex;
+ u8 *offset;
+
+ /* Bulletin Board */
+ struct qed_bulletin bulletin;
+ struct qed_bulletin_content bulletin_shadow;
+
+ /* we set aside a copy of the acquire response */
+ struct pfvf_acquire_resp_tlv acquire_resp;
+};
+
+#ifdef CONFIG_QED_SRIOV
+/**
+ * @brief Read the VF bulletin and act on it if needed
+ *
+ * @param p_hwfn
+ * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
+
+/**
+ * @brief Get link paramters for VF from qed
+ *
+ * @param p_hwfn
+ * @param params - the link params structure to be filled for the VF
+ */
+void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params);
+
+/**
+ * @brief Get link state for VF from qed
+ *
+ * @param p_hwfn
+ * @param link - the link state structure to be filled for the VF
+ */
+void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link);
+
+/**
+ * @brief Get link capabilities for VF from qed
+ *
+ * @param p_hwfn
+ * @param p_link_caps - the link capabilities structure to be filled for the VF
+ */
+void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps);
+
+/**
+ * @brief Get number of Rx queues allocated for VF by qed
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated RX queues
+ */
+void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
+
+/**
+ * @brief Get port mac address for VF
+ *
+ * @param p_hwfn
+ * @param port_mac - destination location for port mac
+ */
+void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
+
+/**
+ * @brief Get number of VLAN filters allocated for VF by qed
+ *
+ * @param p_hwfn
+ * @param num_rxqs - allocated VLAN filters
+ */
+void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
+ u8 *num_vlan_filters);
+
+/**
+ * @brief Check if VF can set a MAC address
+ *
+ * @param p_hwfn
+ * @param mac
+ *
+ * @return bool
+ */
+bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
+
+/**
+ * @brief Set firmware version information in dev_info from VFs acquire response tlv
+ *
+ * @param p_hwfn
+ * @param fw_major
+ * @param fw_minor
+ * @param fw_rev
+ * @param fw_eng
+ */
+void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng);
+
+/**
+ * @brief hw preparation for VF
+ * sends ACQUIRE message
+ *
+ * @param p_hwfn
+ *
+ * @return int
+ */
+int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief VF - start the RX Queue by sending a message to the PF
+ * @param p_hwfn
+ * @param cid - zero based within the VF
+ * @param rx_queue_id - zero based within the VF
+ * @param sb - VF status block for this queue
+ * @param sb_index - Index within the status block
+ * @param bd_max_bytes - maximum number of bytes per bd
+ * @param bd_chain_phys_addr - physical address of bd chain
+ * @param cqe_pbl_addr - physical address of pbl
+ * @param cqe_pbl_size - pbl size
+ * @param pp_prod - pointer to the producer to be
+ * used in fastpath
+ *
+ * @return int
+ */
+int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ u8 rx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_addr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod);
+
+/**
+ * @brief VF - start the TX queue by sending a message to the
+ * PF.
+ *
+ * @param p_hwfn
+ * @param tx_queue_id - zero based within the VF
+ * @param sb - status block for this queue
+ * @param sb_index - index within the status block
+ * @param bd_chain_phys_addr - physical address of tx chain
+ * @param pp_doorbell - pointer to address to which to
+ * write the doorbell too..
+ *
+ * @return int
+ */
+int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell);
+
+/**
+ * @brief VF - stop the RX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param rx_qid
+ * @param cqe_completion
+ *
+ * @return int
+ */
+int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_qid, bool cqe_completion);
+
+/**
+ * @brief VF - stop the TX queue by sending a message to the PF
+ *
+ * @param p_hwfn
+ * @param tx_qid
+ *
+ * @return int
+ */
+int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid);
+
+/**
+ * @brief VF - send a vport update command
+ *
+ * @param p_hwfn
+ * @param params
+ *
+ * @return int
+ */
+int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params);
+
+/**
+ *
+ * @brief VF - send a close message to PF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief VF - free vf`s memories
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
+ * sb_id. For VFs igu sbs don't have to be contiguous
+ *
+ * @param p_hwfn
+ * @param sb_id
+ *
+ * @return INLINE u16
+ */
+u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
+
+/**
+ * @brief qed_vf_pf_vport_start - perform vport start for VF.
+ *
+ * @param p_hwfn
+ * @param vport_id
+ * @param mtu
+ * @param inner_vlan_removal
+ * @param tpa_mode
+ * @param max_buffers_per_cqe,
+ * @param only_untagged - default behavior regarding vlan acceptance
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum qed_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe, u8 only_untagged);
+
+/**
+ * @brief qed_vf_pf_vport_stop - stop the VF's vport
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
+
+int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_ucast *p_param);
+
+void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd);
+
+/**
+ * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
+ *
+ * @param p_hwfn
+ *
+ * @return enum _qed_status
+ */
+int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
+
+/**
+ * @brief - return the link params in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_params - pointer to a struct to fill with link params
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *p_params,
+ struct qed_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link state in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link state
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content *p_bulletin);
+
+/**
+ * @brief - return the link capabilities in a given bulletin board
+ *
+ * @param p_hwfn
+ * @param p_link - pointer to a struct to fill with link capabilities
+ * @param p_bulletin
+ */
+void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+ struct qed_bulletin_content *p_bulletin);
+
+void qed_iov_vf_task(struct work_struct *work);
+#else
+static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params *params)
+{
+}
+
+static inline void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *link)
+{
+}
+
+static inline void
+qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps)
+{
+}
+
+static inline void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs)
+{
+}
+
+static inline void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac)
+{
+}
+
+static inline void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
+ u8 *num_vlan_filters)
+{
+}
+
+static inline bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac)
+{
+ return false;
+}
+
+static inline void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
+ u16 *fw_major, u16 *fw_minor,
+ u16 *fw_rev, u16 *fw_eng)
+{
+}
+
+static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
+ u8 rx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ u16 bd_max_bytes,
+ dma_addr_t bd_chain_phys_adr,
+ dma_addr_t cqe_pbl_addr,
+ u16 cqe_pbl_size, void __iomem **pp_prod)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
+ u16 tx_queue_id,
+ u16 sb,
+ u8 sb_index,
+ dma_addr_t pbl_addr,
+ u16 pbl_size, void __iomem **pp_doorbell)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
+ u16 rx_qid, bool cqe_completion)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
+{
+ return -EINVAL;
+}
+
+static inline int
+qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
+ struct qed_sp_vport_update_params *p_params)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_reset(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_release(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id)
+{
+ return 0;
+}
+
+static inline int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
+ u8 vport_id,
+ u16 mtu,
+ u8 inner_vlan_removal,
+ enum qed_tpa_mode tpa_mode,
+ u8 max_buffers_per_cqe,
+ u8 only_untagged)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_ucast *p_param)
+{
+ return -EINVAL;
+}
+
+static inline void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
+ struct qed_filter_mcast *p_filter_cmd)
+{
+}
+
+static inline int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn)
+{
+ return -EINVAL;
+}
+
+static inline void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_params
+ *p_params,
+ struct qed_bulletin_content
+ *p_bulletin)
+{
+}
+
+static inline void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_state *p_link,
+ struct qed_bulletin_content
+ *p_bulletin)
+{
+}
+
+static inline void
+__qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
+ struct qed_mcp_link_capabilities *p_link_caps,
+ struct qed_bulletin_content *p_bulletin)
+{
+}
+
+static inline void qed_iov_vf_task(struct work_struct *work)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/qlogic/qede/Makefile b/drivers/net/ethernet/qlogic/qede/Makefile
index 06ff90d87572..74a49850d74d 100644
--- a/drivers/net/ethernet/qlogic/qede/Makefile
+++ b/drivers/net/ethernet/qlogic/qede/Makefile
@@ -1,3 +1,4 @@
obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_ethtool.o
+qede-$(CONFIG_DCB) += qede_dcbnl.o
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index d023251544d9..02b06d4e40ae 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -24,16 +24,14 @@
#include <linux/qed/qed_eth_if.h>
#define QEDE_MAJOR_VERSION 8
-#define QEDE_MINOR_VERSION 7
-#define QEDE_REVISION_VERSION 0
-#define QEDE_ENGINEERING_VERSION 0
+#define QEDE_MINOR_VERSION 10
+#define QEDE_REVISION_VERSION 1
+#define QEDE_ENGINEERING_VERSION 20
#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
__stringify(QEDE_MINOR_VERSION) "." \
__stringify(QEDE_REVISION_VERSION) "." \
__stringify(QEDE_ENGINEERING_VERSION)
-#define QEDE_ETH_INTERFACE_VERSION 300
-
#define DRV_MODULE_SYM qede
struct qede_stats {
@@ -61,16 +59,16 @@ struct qede_stats {
/* port */
u64 rx_64_byte_packets;
- u64 rx_127_byte_packets;
- u64 rx_255_byte_packets;
- u64 rx_511_byte_packets;
- u64 rx_1023_byte_packets;
- u64 rx_1518_byte_packets;
- u64 rx_1522_byte_packets;
- u64 rx_2047_byte_packets;
- u64 rx_4095_byte_packets;
- u64 rx_9216_byte_packets;
- u64 rx_16383_byte_packets;
+ u64 rx_65_to_127_byte_packets;
+ u64 rx_128_to_255_byte_packets;
+ u64 rx_256_to_511_byte_packets;
+ u64 rx_512_to_1023_byte_packets;
+ u64 rx_1024_to_1518_byte_packets;
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -114,6 +112,10 @@ struct qede_dev {
u32 dp_module;
u8 dp_level;
+ u32 flags;
+#define QEDE_FLAG_IS_VF BIT(0)
+#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF))
+
const struct qed_eth_ops *ops;
struct qed_dev_eth_info dev_info;
@@ -141,6 +143,8 @@ struct qede_dev {
struct mutex qede_lock;
u32 state; /* Protected by qede_lock */
u16 rx_buf_size;
+ u32 rx_copybreak;
+
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
#define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
/* Max supported alignment is 256 (8 shift)
@@ -156,6 +160,10 @@ struct qede_dev {
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
struct qede_stats stats;
+#define QEDE_RSS_INDIR_INITED BIT(0)
+#define QEDE_RSS_KEY_INITED BIT(1)
+#define QEDE_RSS_CAPS_INITED BIT(2)
+ u32 rss_params_inited; /* bit-field to track initialized rss params */
struct qed_update_vport_rss_params rss_params;
u16 q_num_rx_buffers; /* Must be a power of two */
u16 q_num_tx_buffers; /* Must be a power of two */
@@ -167,6 +175,8 @@ struct qede_dev {
bool accept_any_vlan;
struct delayed_work sp_task;
unsigned long sp_flags;
+ u16 vxlan_dst_port;
+ u16 geneve_dst_port;
};
enum QEDE_STATE {
@@ -227,6 +237,7 @@ struct qede_rx_queue {
u64 rx_hw_errors;
u64 rx_alloc_errors;
+ u64 rx_ip_frags;
};
union db_prod {
@@ -286,13 +297,19 @@ struct qede_fastpath {
#define QEDE_CSUM_ERROR BIT(0)
#define QEDE_CSUM_UNNECESSARY BIT(1)
+#define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
-#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_RX_MODE 1
+#define QEDE_SP_VXLAN_PORT_CONFIG 2
+#define QEDE_SP_GENEVE_PORT_CONFIG 3
union qede_reload_args {
u16 mtu;
};
+#ifdef CONFIG_DCB
+void qede_set_dcbnl_ops(struct net_device *ndev);
+#endif
void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
void qede_set_ethtool_ops(struct net_device *netdev);
void qede_reload(struct qede_dev *edev,
@@ -301,6 +318,10 @@ void qede_reload(struct qede_dev *edev,
union qede_reload_args *args);
int qede_change_mtu(struct net_device *dev, int new_mtu);
void qede_fill_by_demand_stats(struct qede_dev *edev);
+bool qede_has_rx_work(struct qede_rx_queue *rxq);
+int qede_txq_has_work(struct qede_tx_queue *txq);
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev,
+ u8 count);
#define RX_RING_SIZE_POW 13
#define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
@@ -314,6 +335,7 @@ void qede_fill_by_demand_stats(struct qede_dev *edev);
#define NUM_TX_BDS_MIN 128
#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
+#define QEDE_MIN_PKT_LEN 64
#define QEDE_RX_HDR_SIZE 256
#define for_each_rss(i) for (i = 0; i < edev->num_rss; i++)
diff --git a/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
new file mode 100644
index 000000000000..03e8c0212433
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qede/qede_dcbnl.c
@@ -0,0 +1,348 @@
+/* QLogic qede NIC Driver
+* Copyright (c) 2015 QLogic Corporation
+*
+* This software is available under the terms of the GNU General Public License
+* (GPL) Version 2, available from the file COPYING in the main directory of
+* this source tree.
+*/
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/dcbnl.h>
+#include "qede.h"
+
+static u8 qede_dcbnl_getstate(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getstate(edev->cdev);
+}
+
+static u8 qede_dcbnl_setstate(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setstate(edev->cdev, state);
+}
+
+static void qede_dcbnl_getpermhwaddr(struct net_device *netdev,
+ u8 *perm_addr)
+{
+ memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
+}
+
+static void qede_dcbnl_getpgtccfgtx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgtccfgtx(edev->cdev, prio, prio_type,
+ pgid, bw_pct, up_map);
+}
+
+static void qede_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgbwgcfgtx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_getpgtccfgrx(struct net_device *netdev, int prio,
+ u8 *prio_type, u8 *pgid, u8 *bw_pct,
+ u8 *up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgtccfgrx(edev->cdev, prio, prio_type, pgid, bw_pct,
+ up_map);
+}
+
+static void qede_dcbnl_getpgbwgcfgrx(struct net_device *netdev,
+ int pgid, u8 *bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpgbwgcfgrx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_getpfccfg(struct net_device *netdev, int prio,
+ u8 *setting)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->getpfccfg(edev->cdev, prio, setting);
+}
+
+static void qede_dcbnl_setpfccfg(struct net_device *netdev, int prio,
+ u8 setting)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ edev->ops->dcb->setpfccfg(edev->cdev, prio, setting);
+}
+
+static u8 qede_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getcap(edev->cdev, capid, cap);
+}
+
+static int qede_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getnumtcs(edev->cdev, tcid, num);
+}
+
+static u8 qede_dcbnl_getpfcstate(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getpfcstate(edev->cdev);
+}
+
+static int qede_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getapp(edev->cdev, idtype, id);
+}
+
+static u8 qede_dcbnl_getdcbx(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getdcbx(edev->cdev);
+}
+
+static void qede_dcbnl_setpgtccfgtx(struct net_device *netdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgtccfgtx(edev->cdev, prio, pri_type, pgid,
+ bw_pct, up_map);
+}
+
+static void qede_dcbnl_setpgtccfgrx(struct net_device *netdev, int prio,
+ u8 pri_type, u8 pgid, u8 bw_pct, u8 up_map)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgtccfgrx(edev->cdev, prio, pri_type, pgid,
+ bw_pct, up_map);
+}
+
+static void qede_dcbnl_setpgbwgcfgtx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgbwgcfgtx(edev->cdev, pgid, bw_pct);
+}
+
+static void qede_dcbnl_setpgbwgcfgrx(struct net_device *netdev, int pgid,
+ u8 bw_pct)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpgbwgcfgrx(edev->cdev, pgid, bw_pct);
+}
+
+static u8 qede_dcbnl_setall(struct net_device *netdev)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setall(edev->cdev);
+}
+
+static int qede_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setnumtcs(edev->cdev, tcid, num);
+}
+
+static void qede_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setpfcstate(edev->cdev, state);
+}
+
+static int qede_dcbnl_setapp(struct net_device *netdev, u8 idtype, u16 idval,
+ u8 up)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setapp(edev->cdev, idtype, idval, up);
+}
+
+static u8 qede_dcbnl_setdcbx(struct net_device *netdev, u8 state)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setdcbx(edev->cdev, state);
+}
+
+static u8 qede_dcbnl_getfeatcfg(struct net_device *netdev, int featid,
+ u8 *flags)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->getfeatcfg(edev->cdev, featid, flags);
+}
+
+static u8 qede_dcbnl_setfeatcfg(struct net_device *netdev, int featid, u8 flags)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->setfeatcfg(edev->cdev, featid, flags);
+}
+
+static int qede_dcbnl_peer_getappinfo(struct net_device *netdev,
+ struct dcb_peer_app_info *info,
+ u16 *count)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->peer_getappinfo(edev->cdev, info, count);
+}
+
+static int qede_dcbnl_peer_getapptable(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->peer_getapptable(edev->cdev, app);
+}
+
+static int qede_dcbnl_cee_peer_getpfc(struct net_device *netdev,
+ struct cee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->cee_peer_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_cee_peer_getpg(struct net_device *netdev,
+ struct cee_pg *pg)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->cee_peer_getpg(edev->cdev, pg);
+}
+
+static int qede_dcbnl_ieee_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_setpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getets(edev->cdev, ets);
+}
+
+static int qede_dcbnl_ieee_setets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setets(edev->cdev, ets);
+}
+
+static int qede_dcbnl_ieee_getapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_getapp(edev->cdev, app);
+}
+
+static int qede_dcbnl_ieee_setapp(struct net_device *netdev,
+ struct dcb_app *app)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_setapp(edev->cdev, app);
+}
+
+static int qede_dcbnl_ieee_peer_getpfc(struct net_device *netdev,
+ struct ieee_pfc *pfc)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_peer_getpfc(edev->cdev, pfc);
+}
+
+static int qede_dcbnl_ieee_peer_getets(struct net_device *netdev,
+ struct ieee_ets *ets)
+{
+ struct qede_dev *edev = netdev_priv(netdev);
+
+ return edev->ops->dcb->ieee_peer_getets(edev->cdev, ets);
+}
+
+static const struct dcbnl_rtnl_ops qede_dcbnl_ops = {
+ .ieee_getpfc = qede_dcbnl_ieee_getpfc,
+ .ieee_setpfc = qede_dcbnl_ieee_setpfc,
+ .ieee_getets = qede_dcbnl_ieee_getets,
+ .ieee_setets = qede_dcbnl_ieee_setets,
+ .ieee_getapp = qede_dcbnl_ieee_getapp,
+ .ieee_setapp = qede_dcbnl_ieee_setapp,
+ .getdcbx = qede_dcbnl_getdcbx,
+ .ieee_peer_getpfc = qede_dcbnl_ieee_peer_getpfc,
+ .ieee_peer_getets = qede_dcbnl_ieee_peer_getets,
+ .getstate = qede_dcbnl_getstate,
+ .setstate = qede_dcbnl_setstate,
+ .getpermhwaddr = qede_dcbnl_getpermhwaddr,
+ .getpgtccfgtx = qede_dcbnl_getpgtccfgtx,
+ .getpgbwgcfgtx = qede_dcbnl_getpgbwgcfgtx,
+ .getpgtccfgrx = qede_dcbnl_getpgtccfgrx,
+ .getpgbwgcfgrx = qede_dcbnl_getpgbwgcfgrx,
+ .getpfccfg = qede_dcbnl_getpfccfg,
+ .setpfccfg = qede_dcbnl_setpfccfg,
+ .getcap = qede_dcbnl_getcap,
+ .getnumtcs = qede_dcbnl_getnumtcs,
+ .getpfcstate = qede_dcbnl_getpfcstate,
+ .getapp = qede_dcbnl_getapp,
+ .getdcbx = qede_dcbnl_getdcbx,
+ .setpgtccfgtx = qede_dcbnl_setpgtccfgtx,
+ .setpgtccfgrx = qede_dcbnl_setpgtccfgrx,
+ .setpgbwgcfgtx = qede_dcbnl_setpgbwgcfgtx,
+ .setpgbwgcfgrx = qede_dcbnl_setpgbwgcfgrx,
+ .setall = qede_dcbnl_setall,
+ .setnumtcs = qede_dcbnl_setnumtcs,
+ .setpfcstate = qede_dcbnl_setpfcstate,
+ .setapp = qede_dcbnl_setapp,
+ .setdcbx = qede_dcbnl_setdcbx,
+ .setfeatcfg = qede_dcbnl_setfeatcfg,
+ .getfeatcfg = qede_dcbnl_getfeatcfg,
+ .peer_getappinfo = qede_dcbnl_peer_getappinfo,
+ .peer_getapptable = qede_dcbnl_peer_getapptable,
+ .cee_peer_getpfc = qede_dcbnl_cee_peer_getpfc,
+ .cee_peer_getpg = qede_dcbnl_cee_peer_getpg,
+};
+
+void qede_set_dcbnl_ops(struct net_device *dev)
+{
+ dev->dcbnl_ops = &qede_dcbnl_ops;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
index c49dc10ce151..f8492cac9290 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c
@@ -9,6 +9,7 @@
#include <linux/version.h>
#include <linux/types.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/string.h>
#include <linux/pci.h>
@@ -27,12 +28,16 @@
#define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
#define QEDE_RQSTAT(stat_name) \
{QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
+
+#define QEDE_SELFTEST_POLL_COUNT 100
+
static const struct {
u64 offset;
char string[ETH_GSTRING_LEN];
} qede_rqstats_arr[] = {
QEDE_RQSTAT(rx_hw_errors),
QEDE_RQSTAT(rx_alloc_errors),
+ QEDE_RQSTAT(rx_ip_frags),
};
#define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
@@ -59,16 +64,16 @@ static const struct {
QEDE_STAT(tx_bcast_pkts),
QEDE_PF_STAT(rx_64_byte_packets),
- QEDE_PF_STAT(rx_127_byte_packets),
- QEDE_PF_STAT(rx_255_byte_packets),
- QEDE_PF_STAT(rx_511_byte_packets),
- QEDE_PF_STAT(rx_1023_byte_packets),
- QEDE_PF_STAT(rx_1518_byte_packets),
- QEDE_PF_STAT(rx_1522_byte_packets),
- QEDE_PF_STAT(rx_2047_byte_packets),
- QEDE_PF_STAT(rx_4095_byte_packets),
- QEDE_PF_STAT(rx_9216_byte_packets),
- QEDE_PF_STAT(rx_16383_byte_packets),
+ QEDE_PF_STAT(rx_65_to_127_byte_packets),
+ QEDE_PF_STAT(rx_128_to_255_byte_packets),
+ QEDE_PF_STAT(rx_256_to_511_byte_packets),
+ QEDE_PF_STAT(rx_512_to_1023_byte_packets),
+ QEDE_PF_STAT(rx_1024_to_1518_byte_packets),
+ QEDE_PF_STAT(rx_1519_to_1522_byte_packets),
+ QEDE_PF_STAT(rx_1519_to_2047_byte_packets),
+ QEDE_PF_STAT(rx_2048_to_4095_byte_packets),
+ QEDE_PF_STAT(rx_4096_to_9216_byte_packets),
+ QEDE_PF_STAT(rx_9217_to_16383_byte_packets),
QEDE_PF_STAT(tx_64_byte_packets),
QEDE_PF_STAT(tx_65_to_127_byte_packets),
QEDE_PF_STAT(tx_128_to_255_byte_packets),
@@ -116,11 +121,39 @@ static const struct {
#define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
+enum {
+ QEDE_PRI_FLAG_CMT,
+ QEDE_PRI_FLAG_LEN,
+};
+
+static const char qede_private_arr[QEDE_PRI_FLAG_LEN][ETH_GSTRING_LEN] = {
+ "Coupled-Function",
+};
+
+enum qede_ethtool_tests {
+ QEDE_ETHTOOL_INT_LOOPBACK,
+ QEDE_ETHTOOL_INTERRUPT_TEST,
+ QEDE_ETHTOOL_MEMORY_TEST,
+ QEDE_ETHTOOL_REGISTER_TEST,
+ QEDE_ETHTOOL_CLOCK_TEST,
+ QEDE_ETHTOOL_TEST_MAX
+};
+
+static const char qede_tests_str_arr[QEDE_ETHTOOL_TEST_MAX][ETH_GSTRING_LEN] = {
+ "Internal loopback (offline)",
+ "Interrupt (online)\t",
+ "Memory (online)\t\t",
+ "Register (online)\t",
+ "Clock (online)\t\t",
+};
+
static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf)
{
int i, j, k;
for (i = 0, j = 0; i < QEDE_NUM_STATS; i++) {
+ if (IS_VF(edev) && qede_stats_arr[i].pf_only)
+ continue;
strcpy(buf + j * ETH_GSTRING_LEN,
qede_stats_arr[i].string);
j++;
@@ -139,6 +172,14 @@ static void qede_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
case ETH_SS_STATS:
qede_get_strings_stats(edev, buf);
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(buf, qede_private_arr,
+ ETH_GSTRING_LEN * QEDE_PRI_FLAG_LEN);
+ break;
+ case ETH_SS_TEST:
+ memcpy(buf, qede_tests_str_arr,
+ ETH_GSTRING_LEN * QEDE_ETHTOOL_TEST_MAX);
+ break;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Unsupported stringset 0x%08x\n", stringset);
@@ -156,8 +197,11 @@ static void qede_get_ethtool_stats(struct net_device *dev,
mutex_lock(&edev->qede_lock);
- for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++)
+ for (sidx = 0; sidx < QEDE_NUM_STATS; sidx++) {
+ if (IS_VF(edev) && qede_stats_arr[sidx].pf_only)
+ continue;
buf[cnt++] = QEDE_STATS_DATA(edev, sidx);
+ }
for (sidx = 0; sidx < QEDE_NUM_RQSTATS; sidx++) {
buf[cnt] = 0;
@@ -176,8 +220,21 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
switch (stringset) {
case ETH_SS_STATS:
- return num_stats + QEDE_NUM_RQSTATS;
+ if (IS_VF(edev)) {
+ int i;
+ for (i = 0; i < QEDE_NUM_STATS; i++)
+ if (qede_stats_arr[i].pf_only)
+ num_stats--;
+ }
+ return num_stats + QEDE_NUM_RQSTATS;
+ case ETH_SS_PRIV_FLAGS:
+ return QEDE_PRI_FLAG_LEN;
+ case ETH_SS_TEST:
+ if (!IS_VF(edev))
+ return QEDE_ETHTOOL_TEST_MAX;
+ else
+ return 0;
default:
DP_VERBOSE(edev, QED_MSG_DEBUG,
"Unsupported stringset 0x%08x\n", stringset);
@@ -185,6 +242,13 @@ static int qede_get_sset_count(struct net_device *dev, int stringset)
}
}
+static u32 qede_get_priv_flags(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return (!!(edev->dev_info.common.num_hwfns > 1)) << QEDE_PRI_FLAG_CMT;
+}
+
static int qede_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct qede_dev *edev = netdev_priv(dev);
@@ -217,9 +281,9 @@ static int qede_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct qed_link_params params;
u32 speed;
- if (!edev->dev_info.common.is_mf_default) {
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev,
- "Link parameters can not be changed in non-default mode\n");
+ "Link settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
@@ -328,6 +392,12 @@ static int qede_nway_reset(struct net_device *dev)
struct qed_link_output current_link;
struct qed_link_params link_params;
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
+ DP_INFO(edev,
+ "Link settings are not allowed to be changed\n");
+ return -EOPNOTSUPP;
+ }
+
if (!netif_running(dev))
return 0;
@@ -357,6 +427,59 @@ static u32 qede_get_link(struct net_device *dev)
return current_link.link_up;
}
+static int qede_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 rxc, txc;
+
+ memset(coal, 0, sizeof(struct ethtool_coalesce));
+ edev->ops->common->get_coalesce(edev->cdev, &rxc, &txc);
+
+ coal->rx_coalesce_usecs = rxc;
+ coal->tx_coalesce_usecs = txc;
+
+ return 0;
+}
+
+static int qede_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int i, rc = 0;
+ u16 rxc, txc;
+ u8 sb_id;
+
+ if (!netif_running(dev)) {
+ DP_INFO(edev, "Interface is down\n");
+ return -EINVAL;
+ }
+
+ if (coal->rx_coalesce_usecs > QED_COALESCE_MAX ||
+ coal->tx_coalesce_usecs > QED_COALESCE_MAX) {
+ DP_INFO(edev,
+ "Can't support requested %s coalesce value [max supported value %d]\n",
+ coal->rx_coalesce_usecs > QED_COALESCE_MAX ? "rx"
+ : "tx",
+ QED_COALESCE_MAX);
+ return -EINVAL;
+ }
+
+ rxc = (u16)coal->rx_coalesce_usecs;
+ txc = (u16)coal->tx_coalesce_usecs;
+ for_each_rss(i) {
+ sb_id = edev->fp_array[i].sb_info->igu_sb_id;
+ rc = edev->ops->common->set_coalesce(edev->cdev, rxc, txc,
+ (u8)i, sb_id);
+ if (rc) {
+ DP_INFO(edev, "Set coalesce error, rc = %d\n", rc);
+ return rc;
+ }
+ }
+
+ return rc;
+}
+
static void qede_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ering)
{
@@ -428,9 +551,9 @@ static int qede_set_pauseparam(struct net_device *dev,
struct qed_link_params params;
struct qed_link_output current_link;
- if (!edev->dev_info.common.is_mf_default) {
+ if (!edev->ops || !edev->ops->common->can_link_change(edev->cdev)) {
DP_INFO(edev,
- "Pause parameters can not be updated in non-default mode\n");
+ "Pause settings are not allowed to be changed\n");
return -EOPNOTSUPP;
}
@@ -569,6 +692,541 @@ static int qede_set_phys_id(struct net_device *dev,
return 0;
}
+static int qede_get_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V4_FLOW:
+ if (edev->rss_params.rss_caps & QED_RSS_IPV4_UDP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case UDP_V6_FLOW:
+ if (edev->rss_params.rss_caps & QED_RSS_IPV6_UDP)
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int qede_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules __always_unused)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = edev->num_rss;
+ return 0;
+ case ETHTOOL_GRXFH:
+ return qede_get_rss_flags(edev, info);
+ default:
+ DP_ERR(edev, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static int qede_set_rss_flags(struct qede_dev *edev, struct ethtool_rxnfc *info)
+{
+ struct qed_update_vport_params vport_update_params;
+ u8 set_caps = 0, clr_caps = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Set rss flags command parameters: flow type = %d, data = %llu\n",
+ info->flow_type, info->data);
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ /* For TCP only 4-tuple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ case UDP_V4_FLOW:
+ /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ set_caps = QED_RSS_IPV4_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple enabled\n");
+ } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+ clr_caps = QED_RSS_IPV4_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple disabled\n");
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case UDP_V6_FLOW:
+ /* For UDP either 2-tuple hash or 4-tuple hash is supported */
+ if (info->data == (RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ set_caps = QED_RSS_IPV6_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple enabled\n");
+ } else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
+ clr_caps = QED_RSS_IPV6_UDP;
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "UDP 4-tuple disabled\n");
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ /* For IP only 2-tuple hash is supported */
+ if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IP_USER_FLOW:
+ case ETHER_FLOW:
+ /* RSS is not supported for these protocols */
+ if (info->data) {
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ return -EINVAL;
+ }
+
+ /* No action is needed if there is no change in the rss capability */
+ if (edev->rss_params.rss_caps == ((edev->rss_params.rss_caps &
+ ~clr_caps) | set_caps))
+ return 0;
+
+ /* Update internal configuration */
+ edev->rss_params.rss_caps = (edev->rss_params.rss_caps & ~clr_caps) |
+ set_caps;
+ edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+
+ /* Re-configure if possible */
+ if (netif_running(edev->ndev)) {
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.update_rss_flg = 1;
+ vport_update_params.vport_id = 0;
+ memcpy(&vport_update_params.rss_params, &edev->rss_params,
+ sizeof(vport_update_params.rss_params));
+ return edev->ops->vport_update(edev->cdev,
+ &vport_update_params);
+ }
+
+ return 0;
+}
+
+static int qede_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return qede_set_rss_flags(edev, info);
+ default:
+ DP_INFO(edev, "Command parameters not supported\n");
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 qede_get_rxfh_indir_size(struct net_device *dev)
+{
+ return QED_RSS_IND_TABLE_SIZE;
+}
+
+static u32 qede_get_rxfh_key_size(struct net_device *dev)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return sizeof(edev->rss_params.rss_key);
+}
+
+static int qede_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int i;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ if (!indir)
+ return 0;
+
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+ indir[i] = edev->rss_params.rss_ind_table[i];
+
+ if (key)
+ memcpy(key, edev->rss_params.rss_key,
+ qede_get_rxfh_key_size(dev));
+
+ return 0;
+}
+
+static int qede_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct qed_update_vport_params vport_update_params;
+ struct qede_dev *edev = netdev_priv(dev);
+ int i;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (!indir && !key)
+ return 0;
+
+ if (indir) {
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++)
+ edev->rss_params.rss_ind_table[i] = indir[i];
+ edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+ }
+
+ if (key) {
+ memcpy(&edev->rss_params.rss_key, key,
+ qede_get_rxfh_key_size(dev));
+ edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+ }
+
+ if (netif_running(edev->ndev)) {
+ memset(&vport_update_params, 0, sizeof(vport_update_params));
+ vport_update_params.update_rss_flg = 1;
+ vport_update_params.vport_id = 0;
+ memcpy(&vport_update_params.rss_params, &edev->rss_params,
+ sizeof(vport_update_params.rss_params));
+ return edev->ops->vport_update(edev->cdev,
+ &vport_update_params);
+ }
+
+ return 0;
+}
+
+/* This function enables the interrupt generation and the NAPI on the device */
+static void qede_netif_start(struct qede_dev *edev)
+{
+ int i;
+
+ if (!netif_running(edev->ndev))
+ return;
+
+ for_each_rss(i) {
+ /* Update and reenable interrupts */
+ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_ENABLE, 1);
+ napi_enable(&edev->fp_array[i].napi);
+ }
+}
+
+/* This function disables the NAPI and the interrupt generation on the device */
+static void qede_netif_stop(struct qede_dev *edev)
+{
+ int i;
+
+ for_each_rss(i) {
+ napi_disable(&edev->fp_array[i].napi);
+ /* Disable interrupts */
+ qed_sb_ack(edev->fp_array[i].sb_info, IGU_INT_DISABLE, 0);
+ }
+}
+
+static int qede_selftest_transmit_traffic(struct qede_dev *edev,
+ struct sk_buff *skb)
+{
+ struct qede_tx_queue *txq = &edev->fp_array[0].txqs[0];
+ struct eth_tx_1st_bd *first_bd;
+ dma_addr_t mapping;
+ int i, idx, val;
+
+ /* Fill the entry in the SW ring and the BDs in the FW ring */
+ idx = txq->sw_tx_prod & NUM_TX_BDS_MAX;
+ txq->sw_tx_ring[idx].skb = skb;
+ first_bd = qed_chain_produce(&txq->tx_pbl);
+ memset(first_bd, 0, sizeof(*first_bd));
+ val = 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
+ first_bd->data.bd_flags.bitfields = val;
+ val = skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK;
+ first_bd->data.bitfields |= (val << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
+
+ /* Map skb linear data for DMA and set in the first BD */
+ mapping = dma_map_single(&edev->pdev->dev, skb->data,
+ skb_headlen(skb), DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
+ DP_NOTICE(edev, "SKB mapping failed\n");
+ return -ENOMEM;
+ }
+ BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
+
+ /* update the first BD with the actual num BDs */
+ first_bd->data.nbds = 1;
+ txq->sw_tx_prod++;
+ /* 'next page' entries are counted in the producer value */
+ val = cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
+ txq->tx_db.data.bd_prod = val;
+
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the queue lock is released and another start_xmit is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+
+ for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+ if (qede_txq_has_work(txq))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (!qede_txq_has_work(txq)) {
+ DP_NOTICE(edev, "Tx completion didn't happen\n");
+ return -1;
+ }
+
+ first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
+ dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
+ BD_UNMAP_LEN(first_bd), DMA_TO_DEVICE);
+ txq->sw_tx_cons++;
+ txq->sw_tx_ring[idx].skb = NULL;
+
+ return 0;
+}
+
+static int qede_selftest_receive_traffic(struct qede_dev *edev)
+{
+ struct qede_rx_queue *rxq = edev->fp_array[0].rxq;
+ u16 hw_comp_cons, sw_comp_cons, sw_rx_index, len;
+ struct eth_fast_path_rx_reg_cqe *fp_cqe;
+ struct sw_rx_data *sw_rx_data;
+ union eth_rx_cqe *cqe;
+ u8 *data_ptr;
+ int i;
+
+ /* The packet is expected to receive on rx-queue 0 even though RSS is
+ * enabled. This is because the queue 0 is configured as the default
+ * queue and that the loopback traffic is not IP.
+ */
+ for (i = 0; i < QEDE_SELFTEST_POLL_COUNT; i++) {
+ if (qede_has_rx_work(rxq))
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (!qede_has_rx_work(rxq)) {
+ DP_NOTICE(edev, "Failed to receive the traffic\n");
+ return -1;
+ }
+
+ hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
+ sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
+
+ /* Memory barrier to prevent the CPU from doing speculative reads of CQE
+ * / BD before reading hw_comp_cons. If the CQE is read before it is
+ * written by FW, then FW writes CQE and SB, and then the CPU reads the
+ * hw_comp_cons, it will use an old CQE.
+ */
+ rmb();
+
+ /* Get the CQE from the completion ring */
+ cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
+
+ /* Get the data from the SW ring */
+ sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
+ sw_rx_data = &rxq->sw_rx_ring[sw_rx_index];
+ fp_cqe = &cqe->fast_path_regular;
+ len = le16_to_cpu(fp_cqe->len_on_first_bd);
+ data_ptr = (u8 *)(page_address(sw_rx_data->data) +
+ fp_cqe->placement_offset + sw_rx_data->page_offset);
+ for (i = ETH_HLEN; i < len; i++)
+ if (data_ptr[i] != (unsigned char)(i & 0xff)) {
+ DP_NOTICE(edev, "Loopback test failed\n");
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+ return -1;
+ }
+
+ qede_recycle_rx_bd_ring(rxq, edev, 1);
+
+ return 0;
+}
+
+static int qede_selftest_run_loopback(struct qede_dev *edev, u32 loopback_mode)
+{
+ struct qed_link_params link_params;
+ struct sk_buff *skb = NULL;
+ int rc = 0, i;
+ u32 pkt_size;
+ u8 *packet;
+
+ if (!netif_running(edev->ndev)) {
+ DP_NOTICE(edev, "Interface is down\n");
+ return -EINVAL;
+ }
+
+ qede_netif_stop(edev);
+
+ /* Bring up the link in Loopback mode */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+ link_params.loopback_mode = loopback_mode;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Wait for loopback configuration to apply */
+ msleep_interruptible(500);
+
+ /* prepare the loopback packet */
+ pkt_size = edev->ndev->mtu + ETH_HLEN;
+
+ skb = netdev_alloc_skb(edev->ndev, pkt_size);
+ if (!skb) {
+ DP_INFO(edev, "Can't allocate skb\n");
+ rc = -ENOMEM;
+ goto test_loopback_exit;
+ }
+ packet = skb_put(skb, pkt_size);
+ ether_addr_copy(packet, edev->ndev->dev_addr);
+ ether_addr_copy(packet + ETH_ALEN, edev->ndev->dev_addr);
+ memset(packet + (2 * ETH_ALEN), 0x77, (ETH_HLEN - (2 * ETH_ALEN)));
+ for (i = ETH_HLEN; i < pkt_size; i++)
+ packet[i] = (unsigned char)(i & 0xff);
+
+ rc = qede_selftest_transmit_traffic(edev, skb);
+ if (rc)
+ goto test_loopback_exit;
+
+ rc = qede_selftest_receive_traffic(edev);
+ if (rc)
+ goto test_loopback_exit;
+
+ DP_VERBOSE(edev, NETIF_MSG_RX_STATUS, "Loopback test successful\n");
+
+test_loopback_exit:
+ dev_kfree_skb(skb);
+
+ /* Bring up the link in Normal mode */
+ memset(&link_params, 0, sizeof(link_params));
+ link_params.link_up = true;
+ link_params.override_flags = QED_LINK_OVERRIDE_LOOPBACK_MODE;
+ link_params.loopback_mode = QED_LINK_LOOPBACK_NONE;
+ edev->ops->common->set_link(edev->cdev, &link_params);
+
+ /* Wait for loopback configuration to apply */
+ msleep_interruptible(500);
+
+ qede_netif_start(edev);
+
+ return rc;
+}
+
+static void qede_self_test(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Self-test command parameters: offline = %d, external_lb = %d\n",
+ (etest->flags & ETH_TEST_FL_OFFLINE),
+ (etest->flags & ETH_TEST_FL_EXTERNAL_LB) >> 2);
+
+ memset(buf, 0, sizeof(u64) * QEDE_ETHTOOL_TEST_MAX);
+
+ if (etest->flags & ETH_TEST_FL_OFFLINE) {
+ if (qede_selftest_run_loopback(edev,
+ QED_LINK_LOOPBACK_INT_PHY)) {
+ buf[QEDE_ETHTOOL_INT_LOOPBACK] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+ }
+
+ if (edev->ops->common->selftest->selftest_interrupt(edev->cdev)) {
+ buf[QEDE_ETHTOOL_INTERRUPT_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_memory(edev->cdev)) {
+ buf[QEDE_ETHTOOL_MEMORY_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_register(edev->cdev)) {
+ buf[QEDE_ETHTOOL_REGISTER_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+
+ if (edev->ops->common->selftest->selftest_clock(edev->cdev)) {
+ buf[QEDE_ETHTOOL_CLOCK_TEST] = 1;
+ etest->flags |= ETH_TEST_FL_FAILED;
+ }
+}
+
+static int qede_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u32 val;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ val = *(u32 *)data;
+ if (val < QEDE_MIN_PKT_LEN || val > QEDE_RX_HDR_SIZE) {
+ DP_VERBOSE(edev, QED_MSG_DEBUG,
+ "Invalid rx copy break value, range is [%u, %u]",
+ QEDE_MIN_PKT_LEN, QEDE_RX_HDR_SIZE);
+ return -EINVAL;
+ }
+
+ edev->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int qede_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = edev->rx_copybreak;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops qede_ethtool_ops = {
.get_settings = qede_get_settings,
.set_settings = qede_set_settings,
@@ -577,6 +1235,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
.set_msglevel = qede_set_msglevel,
.nway_reset = qede_nway_reset,
.get_link = qede_get_link,
+ .get_coalesce = qede_get_coalesce,
+ .set_coalesce = qede_set_coalesce,
.get_ringparam = qede_get_ringparam,
.set_ringparam = qede_set_ringparam,
.get_pauseparam = qede_get_pauseparam,
@@ -584,13 +1244,51 @@ static const struct ethtool_ops qede_ethtool_ops = {
.get_strings = qede_get_strings,
.set_phys_id = qede_set_phys_id,
.get_ethtool_stats = qede_get_ethtool_stats,
+ .get_priv_flags = qede_get_priv_flags,
.get_sset_count = qede_get_sset_count,
+ .get_rxnfc = qede_get_rxnfc,
+ .set_rxnfc = qede_set_rxnfc,
+ .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+ .get_rxfh_key_size = qede_get_rxfh_key_size,
+ .get_rxfh = qede_get_rxfh,
+ .set_rxfh = qede_set_rxfh,
+ .get_channels = qede_get_channels,
+ .set_channels = qede_set_channels,
+ .self_test = qede_self_test,
+ .get_tunable = qede_get_tunable,
+ .set_tunable = qede_set_tunable,
+};
+static const struct ethtool_ops qede_vf_ethtool_ops = {
+ .get_settings = qede_get_settings,
+ .get_drvinfo = qede_get_drvinfo,
+ .get_msglevel = qede_get_msglevel,
+ .set_msglevel = qede_set_msglevel,
+ .get_link = qede_get_link,
+ .get_ringparam = qede_get_ringparam,
+ .set_ringparam = qede_set_ringparam,
+ .get_strings = qede_get_strings,
+ .get_ethtool_stats = qede_get_ethtool_stats,
+ .get_priv_flags = qede_get_priv_flags,
+ .get_sset_count = qede_get_sset_count,
+ .get_rxnfc = qede_get_rxnfc,
+ .set_rxnfc = qede_set_rxnfc,
+ .get_rxfh_indir_size = qede_get_rxfh_indir_size,
+ .get_rxfh_key_size = qede_get_rxfh_key_size,
+ .get_rxfh = qede_get_rxfh,
+ .set_rxfh = qede_set_rxfh,
.get_channels = qede_get_channels,
.set_channels = qede_set_channels,
+ .get_tunable = qede_get_tunable,
+ .set_tunable = qede_set_tunable,
};
void qede_set_ethtool_ops(struct net_device *dev)
{
- dev->ethtool_ops = &qede_ethtool_ops;
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (IS_VF(edev))
+ dev->ethtool_ops = &qede_vf_ethtool_ops;
+ else
+ dev->ethtool_ops = &qede_ethtool_ops;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 12f6615797de..9544e4c41359 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -24,7 +24,7 @@
#include <linux/netdev_features.h>
#include <linux/udp.h>
#include <linux/tcp.h>
-#include <net/vxlan.h>
+#include <net/udp_tunnel.h>
#include <linux/ip.h>
#include <net/ipv6.h>
#include <net/tcp.h>
@@ -58,6 +58,7 @@ static const struct qed_eth_ops *qed_ops;
#define CHIP_NUM_57980S_100 0x1644
#define CHIP_NUM_57980S_50 0x1654
#define CHIP_NUM_57980S_25 0x1656
+#define CHIP_NUM_57980S_IOV 0x1664
#ifndef PCI_DEVICE_ID_NX2_57980E
#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40
@@ -66,15 +67,24 @@ static const struct qed_eth_ops *qed_ops;
#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100
#define PCI_DEVICE_ID_57980S_50 CHIP_NUM_57980S_50
#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25
+#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV
#endif
+enum qede_pci_private {
+ QEDE_PRIVATE_PF,
+ QEDE_PRIVATE_VF
+};
+
static const struct pci_device_id qede_pci_tbl[] = {
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), 0 },
- { PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), 0 },
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_40), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_10), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_MF), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_100), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_50), QEDE_PRIVATE_PF},
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_25), QEDE_PRIVATE_PF},
+#ifdef CONFIG_QED_SRIOV
+ {PCI_VDEVICE(QLOGIC, PCI_DEVICE_ID_57980S_IOV), QEDE_PRIVATE_VF},
+#endif
{ 0 }
};
@@ -89,17 +99,87 @@ static int qede_alloc_rx_buffer(struct qede_dev *edev,
struct qede_rx_queue *rxq);
static void qede_link_update(void *dev, struct qed_link_output *link);
+#ifdef CONFIG_QED_SRIOV
+static int qede_set_vf_vlan(struct net_device *ndev, int vf, u16 vlan, u8 qos)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ if (vlan > 4095) {
+ DP_NOTICE(edev, "Illegal vlan value %d\n", vlan);
+ return -EINVAL;
+ }
+
+ DP_VERBOSE(edev, QED_MSG_IOV, "Setting Vlan 0x%04x to VF [%d]\n",
+ vlan, vf);
+
+ return edev->ops->iov->set_vlan(edev->cdev, vlan, vf);
+}
+
+static int qede_set_vf_mac(struct net_device *ndev, int vfidx, u8 *mac)
+{
+ struct qede_dev *edev = netdev_priv(ndev);
+
+ DP_VERBOSE(edev, QED_MSG_IOV,
+ "Setting MAC %02x:%02x:%02x:%02x:%02x:%02x to VF [%d]\n",
+ mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], vfidx);
+
+ if (!is_valid_ether_addr(mac)) {
+ DP_VERBOSE(edev, QED_MSG_IOV, "MAC address isn't valid\n");
+ return -EINVAL;
+ }
+
+ return edev->ops->iov->set_mac(edev->cdev, mac, vfidx);
+}
+
+static int qede_sriov_configure(struct pci_dev *pdev, int num_vfs_param)
+{
+ struct qede_dev *edev = netdev_priv(pci_get_drvdata(pdev));
+ struct qed_dev_info *qed_info = &edev->dev_info.common;
+ int rc;
+
+ DP_VERBOSE(edev, QED_MSG_IOV, "Requested %d VFs\n", num_vfs_param);
+
+ rc = edev->ops->iov->configure(edev->cdev, num_vfs_param);
+
+ /* Enable/Disable Tx switching for PF */
+ if ((rc == num_vfs_param) && netif_running(edev->ndev) &&
+ qed_info->mf_mode != QED_MF_NPAR && qed_info->tx_switching) {
+ struct qed_update_vport_params params;
+
+ memset(&params, 0, sizeof(params));
+ params.vport_id = 0;
+ params.update_tx_switching_flg = 1;
+ params.tx_switching_flg = num_vfs_param ? 1 : 0;
+ edev->ops->vport_update(edev->cdev, &params);
+ }
+
+ return rc;
+}
+#endif
+
static struct pci_driver qede_pci_driver = {
.name = "qede",
.id_table = qede_pci_tbl,
.probe = qede_probe,
.remove = qede_remove,
+#ifdef CONFIG_QED_SRIOV
+ .sriov_configure = qede_sriov_configure,
+#endif
};
+static void qede_force_mac(void *dev, u8 *mac)
+{
+ struct qede_dev *edev = dev;
+
+ ether_addr_copy(edev->ndev->dev_addr, mac);
+ ether_addr_copy(edev->primary_mac, mac);
+}
+
static struct qed_eth_cb_ops qede_ll_ops = {
{
.link_update = qede_link_update,
},
+ .force_mac = qede_force_mac,
};
static int qede_netdev_event(struct notifier_block *this, unsigned long event,
@@ -141,19 +221,10 @@ static
int __init qede_init(void)
{
int ret;
- u32 qed_ver;
pr_notice("qede_init: %s\n", version);
- qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH);
- if (qed_ver != QEDE_ETH_INTERFACE_VERSION) {
- pr_notice("Version mismatch [%08x != %08x]\n",
- qed_ver,
- QEDE_ETH_INTERFACE_VERSION);
- return -EINVAL;
- }
-
- qed_ops = qed_get_eth_ops(QEDE_ETH_INTERFACE_VERSION);
+ qed_ops = qed_get_eth_ops();
if (!qed_ops) {
pr_notice("Failed to get qed ethtool operations\n");
return -EINVAL;
@@ -319,6 +390,9 @@ static u32 qede_xmit_type(struct qede_dev *edev,
(ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
*ipv6_ext = 1;
+ if (skb->encapsulation)
+ rc |= XMIT_ENC;
+
if (skb_is_gso(skb))
rc |= XMIT_LSO;
@@ -380,6 +454,16 @@ static int map_frag_to_bd(struct qede_dev *edev,
return 0;
}
+static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
+{
+ if (is_encap_pkt)
+ return (skb_inner_transport_header(skb) +
+ inner_tcp_hdrlen(skb) - skb->data);
+ else
+ return (skb_transport_header(skb) +
+ tcp_hdrlen(skb) - skb->data);
+}
+
/* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
#if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
@@ -390,8 +474,7 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
if (xmit_type & XMIT_LSO) {
int hlen;
- hlen = skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data;
+ hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
/* linear payload would require its own BD */
if (skb_headlen(skb) > hlen)
@@ -402,6 +485,24 @@ static bool qede_pkt_req_lin(struct qede_dev *edev, struct sk_buff *skb,
}
#endif
+static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
+{
+ /* wmb makes sure that the BDs data is updated before updating the
+ * producer, otherwise FW may read old data from the BDs.
+ */
+ wmb();
+ barrier();
+ writel(txq->tx_db.raw, txq->doorbell_addr);
+
+ /* mmiowb is needed to synchronize doorbell writes from more than one
+ * processor. It guarantees that the write arrives to the device before
+ * the queue lock is released and another start_xmit is called (possibly
+ * on another CPU). Without this barrier, the next doorbell can bypass
+ * this doorbell. This is applicable to IA64/Altix systems.
+ */
+ mmiowb();
+}
+
/* Main transmit function */
static
netdev_tx_t qede_start_xmit(struct sk_buff *skb,
@@ -460,6 +561,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) {
DP_NOTICE(edev, "SKB mapping failed\n");
qede_free_failed_tx_pkt(edev, txq, first_bd, 0, false);
+ qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
nbd++;
@@ -491,15 +593,18 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
/* Fill the parsing flags & params according to the requested offload */
if (xmit_type & XMIT_L4_CSUM) {
- u16 temp = 1 << ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT;
-
/* We don't re-calculate IP checksum as it is already done by
* the upper stack
*/
first_bd->data.bd_flags.bitfields |=
1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
- first_bd->data.bitfields |= cpu_to_le16(temp);
+ if (xmit_type & XMIT_ENC) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ first_bd->data.bitfields |=
+ 1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
+ }
/* If the packet is IPv6 with extension header, indicate that
* to FW and pass few params, since the device cracker doesn't
@@ -515,10 +620,15 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
third_bd->data.lso_mss =
cpu_to_le16(skb_shinfo(skb)->gso_size);
- first_bd->data.bd_flags.bitfields |=
- 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
- hlen = skb_transport_header(skb) +
- tcp_hdrlen(skb) - skb->data;
+ if (unlikely(xmit_type & XMIT_ENC)) {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
+ hlen = qede_get_skb_hlen(skb, true);
+ } else {
+ first_bd->data.bd_flags.bitfields |=
+ 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
+ hlen = qede_get_skb_hlen(skb, false);
+ }
/* @@@TBD - if will not be removed need to check */
third_bd->data.bitfields |=
@@ -551,6 +661,10 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
tx_data_bd = (struct eth_tx_bd *)third_bd;
data_split = true;
}
+ } else {
+ first_bd->data.bitfields |=
+ (skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
+ ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
}
/* Handle fragmented skb */
@@ -562,6 +676,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (rc) {
qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
data_split);
+ qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
@@ -586,6 +701,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
if (rc) {
qede_free_failed_tx_pkt(edev, txq, first_bd, nbd,
data_split);
+ qede_update_tx_producer(txq);
return NETDEV_TX_OK;
}
}
@@ -606,23 +722,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
txq->tx_db.data.bd_prod =
cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
- /* wmb makes sure that the BDs data is updated before updating the
- * producer, otherwise FW may read old data from the BDs.
- */
- wmb();
- barrier();
- writel(txq->tx_db.raw, txq->doorbell_addr);
-
- /* mmiowb is needed to synchronize doorbell writes from more than one
- * processor. It guarantees that the write arrives to the device before
- * the queue lock is released and another start_xmit is called (possibly
- * on another CPU). Without this barrier, the next doorbell can bypass
- * this doorbell. This is applicable to IA64/Altix systems.
- */
- mmiowb();
+ if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
+ qede_update_tx_producer(txq);
if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
< (MAX_SKB_FRAGS + 1))) {
+ if (skb->xmit_more)
+ qede_update_tx_producer(txq);
+
netif_tx_stop_queue(netdev_txq);
DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
"Stop queue was called\n");
@@ -644,7 +751,7 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
}
-static int qede_txq_has_work(struct qede_tx_queue *txq)
+int qede_txq_has_work(struct qede_tx_queue *txq)
{
u16 hw_bd_cons;
@@ -727,7 +834,7 @@ static int qede_tx_int(struct qede_dev *edev,
return 0;
}
-static bool qede_has_rx_work(struct qede_rx_queue *rxq)
+bool qede_has_rx_work(struct qede_rx_queue *rxq)
{
u16 hw_comp_cons, sw_comp_cons;
@@ -782,8 +889,8 @@ static inline void qede_reuse_page(struct qede_dev *edev,
/* In case of allocation failures reuse buffers
* from consumer index to produce buffers for firmware
*/
-static void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
- struct qede_dev *edev, u8 count)
+void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
+ struct qede_dev *edev, u8 count)
{
struct sw_rx_data *curr_cons;
@@ -818,7 +925,7 @@ static inline int qede_realloc_rx_buffer(struct qede_dev *edev,
* network stack to take the ownership of the page
* which can be recycled multiple times by the driver.
*/
- atomic_inc(&curr_cons->data->_count);
+ page_ref_inc(curr_cons->data);
qede_reuse_page(edev, rxq, curr_cons);
}
@@ -879,6 +986,9 @@ static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
if (csum_flag & QEDE_CSUM_UNNECESSARY)
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY)
+ skb->csum_level = 1;
}
static inline void qede_skb_receive(struct qede_dev *edev,
@@ -931,7 +1041,7 @@ static int qede_fill_frag_skb(struct qede_dev *edev,
/* Incr page ref count to reuse on allocation failure
* so that it doesn't get freed while freeing SKB.
*/
- atomic_inc(&current_bd->data->_count);
+ page_ref_inc(current_bd->data);
goto out;
}
@@ -971,8 +1081,7 @@ static void qede_tpa_start(struct qede_dev *edev,
* start until its over and we don't want to risk allocation failing
* here, so re-allocate when aggregation will be over.
*/
- dma_unmap_addr_set(sw_rx_data_prod, mapping,
- dma_unmap_addr(replace_buf, mapping));
+ sw_rx_data_prod->mapping = replace_buf->mapping;
sw_rx_data_prod->data = replace_buf->data;
rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(mapping));
@@ -1188,13 +1297,47 @@ err:
tpa_info->skb = NULL;
}
-static u8 qede_check_csum(u16 flag)
+static bool qede_tunn_exist(u16 flag)
+{
+ return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
+}
+
+static u8 qede_check_tunn_csum(u16 flag)
+{
+ u16 csum_flag = 0;
+ u8 tcsum = 0;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
+
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
+ csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
+ tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
+ }
+
+ csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
+ PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
+
+ if (csum_flag & flag)
+ return QEDE_CSUM_ERROR;
+
+ return QEDE_CSUM_UNNECESSARY | tcsum;
+}
+
+static u8 qede_check_notunn_csum(u16 flag)
{
u16 csum_flag = 0;
u8 csum = 0;
- if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
- PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
+ if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
+ PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
csum = QEDE_CSUM_UNNECESSARY;
@@ -1209,6 +1352,28 @@ static u8 qede_check_csum(u16 flag)
return csum;
}
+static u8 qede_check_csum(u16 flag)
+{
+ if (!qede_tunn_exist(flag))
+ return qede_check_notunn_csum(flag);
+ else
+ return qede_check_tunn_csum(flag);
+}
+
+static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
+ u16 flag)
+{
+ u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
+
+ if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
+ ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
+ (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
+ PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
+ return true;
+
+ return false;
+}
+
static int qede_rx_int(struct qede_fastpath *fp, int budget)
{
struct qede_dev *edev = fp->edev;
@@ -1287,6 +1452,12 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
csum_flag = qede_check_csum(parse_flag);
if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
+ if (qede_pkt_is_ip_fragmented(&cqe->fast_path_regular,
+ parse_flag)) {
+ rxq->rx_ip_frags++;
+ goto alloc_skb;
+ }
+
DP_NOTICE(edev,
"CQE in CONS = %u has error, flags = %x, dropping incoming packet\n",
sw_comp_cons, parse_flag);
@@ -1295,6 +1466,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
goto next_cqe;
}
+alloc_skb:
skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
if (unlikely(!skb)) {
DP_NOTICE(edev,
@@ -1305,7 +1477,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
}
/* Copy data into SKB */
- if (len + pad <= QEDE_RX_HDR_SIZE) {
+ if (len + pad <= edev->rx_copybreak) {
memcpy(skb_put(skb, len),
page_address(data) + pad +
sw_rx_data->page_offset, len);
@@ -1340,7 +1512,7 @@ static int qede_rx_int(struct qede_fastpath *fp, int budget)
* freeing SKB.
*/
- atomic_inc(&sw_rx_data->data->_count);
+ page_ref_inc(sw_rx_data->data);
rxq->rx_alloc_errors++;
qede_recycle_rx_bd_ring(rxq, edev,
fp_cqe->bd_num);
@@ -1437,56 +1609,49 @@ next_cqe: /* don't consume bd rx buffer */
static int qede_poll(struct napi_struct *napi, int budget)
{
- int work_done = 0;
struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
- napi);
+ napi);
struct qede_dev *edev = fp->edev;
+ int rx_work_done = 0;
+ u8 tc;
- while (1) {
- u8 tc;
-
- for (tc = 0; tc < edev->num_tc; tc++)
- if (qede_txq_has_work(&fp->txqs[tc]))
- qede_tx_int(edev, &fp->txqs[tc]);
-
- if (qede_has_rx_work(fp->rxq)) {
- work_done += qede_rx_int(fp, budget - work_done);
-
- /* must not complete if we consumed full budget */
- if (work_done >= budget)
- break;
- }
+ for (tc = 0; tc < edev->num_tc; tc++)
+ if (qede_txq_has_work(&fp->txqs[tc]))
+ qede_tx_int(edev, &fp->txqs[tc]);
+
+ rx_work_done = qede_has_rx_work(fp->rxq) ?
+ qede_rx_int(fp, budget) : 0;
+ if (rx_work_done < budget) {
+ qed_sb_update_sb_idx(fp->sb_info);
+ /* *_has_*_work() reads the status block,
+ * thus we need to ensure that status block indices
+ * have been actually read (qed_sb_update_sb_idx)
+ * prior to this check (*_has_*_work) so that
+ * we won't write the "newer" value of the status block
+ * to HW (if there was a DMA right after
+ * qede_has_rx_work and if there is no rmb, the memory
+ * reading (qed_sb_update_sb_idx) may be postponed
+ * to right before *_ack_sb). In this case there
+ * will never be another interrupt until there is
+ * another update of the status block, while there
+ * is still unhandled work.
+ */
+ rmb();
/* Fall out from the NAPI loop if needed */
- if (!(qede_has_rx_work(fp->rxq) || qede_has_tx_work(fp))) {
- qed_sb_update_sb_idx(fp->sb_info);
- /* *_has_*_work() reads the status block,
- * thus we need to ensure that status block indices
- * have been actually read (qed_sb_update_sb_idx)
- * prior to this check (*_has_*_work) so that
- * we won't write the "newer" value of the status block
- * to HW (if there was a DMA right after
- * qede_has_rx_work and if there is no rmb, the memory
- * reading (qed_sb_update_sb_idx) may be postponed
- * to right before *_ack_sb). In this case there
- * will never be another interrupt until there is
- * another update of the status block, while there
- * is still unhandled work.
- */
- rmb();
-
- if (!(qede_has_rx_work(fp->rxq) ||
- qede_has_tx_work(fp))) {
- napi_complete(napi);
- /* Update and reenable interrupts */
- qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
- 1 /*update*/);
- break;
- }
+ if (!(qede_has_rx_work(fp->rxq) ||
+ qede_has_tx_work(fp))) {
+ napi_complete(napi);
+
+ /* Update and reenable interrupts */
+ qed_sb_ack(fp->sb_info, IGU_INT_ENABLE,
+ 1 /*update*/);
+ } else {
+ rx_work_done = budget;
}
}
- return work_done;
+ return rx_work_done;
}
static irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
@@ -1569,16 +1734,25 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
edev->stats.coalesced_bytes = stats.tpa_coalesced_bytes;
edev->stats.rx_64_byte_packets = stats.rx_64_byte_packets;
- edev->stats.rx_127_byte_packets = stats.rx_127_byte_packets;
- edev->stats.rx_255_byte_packets = stats.rx_255_byte_packets;
- edev->stats.rx_511_byte_packets = stats.rx_511_byte_packets;
- edev->stats.rx_1023_byte_packets = stats.rx_1023_byte_packets;
- edev->stats.rx_1518_byte_packets = stats.rx_1518_byte_packets;
- edev->stats.rx_1522_byte_packets = stats.rx_1522_byte_packets;
- edev->stats.rx_2047_byte_packets = stats.rx_2047_byte_packets;
- edev->stats.rx_4095_byte_packets = stats.rx_4095_byte_packets;
- edev->stats.rx_9216_byte_packets = stats.rx_9216_byte_packets;
- edev->stats.rx_16383_byte_packets = stats.rx_16383_byte_packets;
+ edev->stats.rx_65_to_127_byte_packets = stats.rx_65_to_127_byte_packets;
+ edev->stats.rx_128_to_255_byte_packets =
+ stats.rx_128_to_255_byte_packets;
+ edev->stats.rx_256_to_511_byte_packets =
+ stats.rx_256_to_511_byte_packets;
+ edev->stats.rx_512_to_1023_byte_packets =
+ stats.rx_512_to_1023_byte_packets;
+ edev->stats.rx_1024_to_1518_byte_packets =
+ stats.rx_1024_to_1518_byte_packets;
+ edev->stats.rx_1519_to_1522_byte_packets =
+ stats.rx_1519_to_1522_byte_packets;
+ edev->stats.rx_1519_to_2047_byte_packets =
+ stats.rx_1519_to_2047_byte_packets;
+ edev->stats.rx_2048_to_4095_byte_packets =
+ stats.rx_2048_to_4095_byte_packets;
+ edev->stats.rx_4096_to_9216_byte_packets =
+ stats.rx_4096_to_9216_byte_packets;
+ edev->stats.rx_9217_to_16383_byte_packets =
+ stats.rx_9217_to_16383_byte_packets;
edev->stats.rx_crc_errors = stats.rx_crc_errors;
edev->stats.rx_mac_crtl_frames = stats.rx_mac_crtl_frames;
edev->stats.rx_pause_frames = stats.rx_pause_frames;
@@ -1652,6 +1826,49 @@ static struct rtnl_link_stats64 *qede_get_stats64(
return stats;
}
+#ifdef CONFIG_QED_SRIOV
+static int qede_get_vf_config(struct net_device *dev, int vfidx,
+ struct ifla_vf_info *ivi)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->get_config(edev->cdev, vfidx, ivi);
+}
+
+static int qede_set_vf_rate(struct net_device *dev, int vfidx,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ return edev->ops->iov->set_rate(edev->cdev, vfidx, min_tx_rate,
+ max_tx_rate);
+}
+
+static int qede_set_vf_spoofchk(struct net_device *dev, int vfidx, bool val)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->set_spoof(edev->cdev, vfidx, val);
+}
+
+static int qede_set_vf_link_state(struct net_device *dev, int vfidx,
+ int link_state)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+
+ if (!edev->ops)
+ return -EINVAL;
+
+ return edev->ops->iov->set_link_state(edev->cdev, vfidx, link_state);
+}
+#endif
+
static void qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
{
struct qed_update_vport_params params;
@@ -1850,10 +2067,13 @@ static int qede_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
}
/* Remove vlan */
- rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL, vid);
- if (rc) {
- DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
- return -EINVAL;
+ if (vlan->configured) {
+ rc = qede_set_ucast_rx_vlan(edev, QED_FILTER_XCAST_TYPE_DEL,
+ vid);
+ if (rc) {
+ DP_ERR(edev, "Failed to remove VLAN %d\n", vid);
+ return -EINVAL;
+ }
}
qede_del_vlan_from_list(edev, vlan);
@@ -1893,6 +2113,99 @@ static void qede_vlan_mark_nonconfigured(struct qede_dev *edev)
edev->accept_any_vlan = false;
}
+int qede_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ netdev_features_t changes = features ^ dev->features;
+ bool need_reload = false;
+
+ /* No action needed if hardware GRO is disabled during driver load */
+ if (changes & NETIF_F_GRO) {
+ if (dev->features & NETIF_F_GRO)
+ need_reload = !edev->gro_disable;
+ else
+ need_reload = edev->gro_disable;
+ }
+
+ if (need_reload && netif_running(edev->ndev)) {
+ dev->features = features;
+ qede_reload(edev, NULL, NULL);
+ return 1;
+ }
+
+ return 0;
+}
+
+static void qede_udp_tunnel_add(struct net_device *dev,
+ struct udp_tunnel_info *ti)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(ti->port);
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (edev->vxlan_dst_port)
+ return;
+
+ edev->vxlan_dst_port = t_port;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added vxlan port=%d",
+ t_port);
+
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (edev->geneve_dst_port)
+ return;
+
+ edev->geneve_dst_port = t_port;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Added geneve port=%d",
+ t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ break;
+ default:
+ return;
+ }
+
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
+static void qede_udp_tunnel_del(struct net_device *dev,
+ struct udp_tunnel_info *ti)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ u16 t_port = ntohs(ti->port);
+
+ switch (ti->type) {
+ case UDP_TUNNEL_TYPE_VXLAN:
+ if (t_port != edev->vxlan_dst_port)
+ return;
+
+ edev->vxlan_dst_port = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted vxlan port=%d",
+ t_port);
+
+ set_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags);
+ break;
+ case UDP_TUNNEL_TYPE_GENEVE:
+ if (t_port != edev->geneve_dst_port)
+ return;
+
+ edev->geneve_dst_port = 0;
+
+ DP_VERBOSE(edev, QED_MSG_DEBUG, "Deleted geneve port=%d",
+ t_port);
+ set_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags);
+ break;
+ default:
+ return;
+ }
+
+ schedule_delayed_work(&edev->sp_task, 0);
+}
+
static const struct net_device_ops qede_netdev_ops = {
.ndo_open = qede_open,
.ndo_stop = qede_close,
@@ -1901,9 +2214,22 @@ static const struct net_device_ops qede_netdev_ops = {
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = qede_change_mtu,
+#ifdef CONFIG_QED_SRIOV
+ .ndo_set_vf_mac = qede_set_vf_mac,
+ .ndo_set_vf_vlan = qede_set_vf_vlan,
+#endif
.ndo_vlan_rx_add_vid = qede_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = qede_vlan_rx_kill_vid,
+ .ndo_set_features = qede_set_features,
.ndo_get_stats64 = qede_get_stats64,
+#ifdef CONFIG_QED_SRIOV
+ .ndo_set_vf_link_state = qede_set_vf_link_state,
+ .ndo_set_vf_spoofchk = qede_set_vf_spoofchk,
+ .ndo_get_vf_config = qede_get_vf_config,
+ .ndo_set_vf_rate = qede_set_vf_rate,
+#endif
+ .ndo_udp_tunnel_add = qede_udp_tunnel_add,
+ .ndo_udp_tunnel_del = qede_udp_tunnel_del,
};
/* -------------------------------------------------------------------------
@@ -1974,6 +2300,14 @@ static void qede_init_ndev(struct qede_dev *edev)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
+ /* Encap features*/
+ hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_TSO_ECN;
+ ndev->hw_enc_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO_ECN |
+ NETIF_F_TSO6 | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_UDP_TUNNEL | NETIF_F_RXCSUM;
+
ndev->vlan_features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
NETIF_F_HIGHDMA;
ndev->features = hw_features | NETIF_F_RXHASH | NETIF_F_RXCSUM |
@@ -2074,6 +2408,8 @@ static void qede_sp_task(struct work_struct *work)
{
struct qede_dev *edev = container_of(work, struct qede_dev,
sp_task.work);
+ struct qed_dev *cdev = edev->cdev;
+
mutex_lock(&edev->qede_lock);
if (edev->state == QEDE_STATE_OPEN) {
@@ -2081,6 +2417,24 @@ static void qede_sp_task(struct work_struct *work)
qede_config_rx_mode(edev->ndev);
}
+ if (test_and_clear_bit(QEDE_SP_VXLAN_PORT_CONFIG, &edev->sp_flags)) {
+ struct qed_tunn_params tunn_params;
+
+ memset(&tunn_params, 0, sizeof(tunn_params));
+ tunn_params.update_vxlan_port = 1;
+ tunn_params.vxlan_port = edev->vxlan_dst_port;
+ qed_ops->tunn_config(cdev, &tunn_params);
+ }
+
+ if (test_and_clear_bit(QEDE_SP_GENEVE_PORT_CONFIG, &edev->sp_flags)) {
+ struct qed_tunn_params tunn_params;
+
+ memset(&tunn_params, 0, sizeof(tunn_params));
+ tunn_params.update_geneve_port = 1;
+ tunn_params.geneve_port = edev->geneve_dst_port;
+ qed_ops->tunn_config(cdev, &tunn_params);
+ }
+
mutex_unlock(&edev->qede_lock);
}
@@ -2099,8 +2453,9 @@ enum qede_probe_mode {
};
static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
- enum qede_probe_mode mode)
+ bool is_vf, enum qede_probe_mode mode)
{
+ struct qed_probe_params probe_params;
struct qed_slowpath_params params;
struct qed_dev_eth_info dev_info;
struct qede_dev *edev;
@@ -2110,8 +2465,12 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
if (unlikely(dp_level & QED_LEVEL_INFO))
pr_notice("Starting qede probe\n");
- cdev = qed_ops->common->probe(pdev, QED_PROTOCOL_ETH,
- dp_module, dp_level);
+ memset(&probe_params, 0, sizeof(probe_params));
+ probe_params.protocol = QED_PROTOCOL_ETH;
+ probe_params.dp_module = dp_module;
+ probe_params.dp_level = dp_level;
+ probe_params.is_vf = is_vf;
+ cdev = qed_ops->common->probe(pdev, &probe_params);
if (!cdev) {
rc = -ENODEV;
goto err0;
@@ -2145,6 +2504,9 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
goto err2;
}
+ if (is_vf)
+ edev->flags |= QEDE_FLAG_IS_VF;
+
qede_init_ndev(edev);
rc = register_netdev(edev->ndev);
@@ -2157,8 +2519,14 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
+#ifdef CONFIG_DCB
+ if (!IS_VF(edev))
+ qede_set_dcbnl_ops(edev->ndev);
+#endif
+
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
mutex_init(&edev->qede_lock);
+ edev->rx_copybreak = QEDE_RX_HDR_SIZE;
DP_INFO(edev, "Ending successfully qede probe\n");
@@ -2176,12 +2544,24 @@ err0:
static int qede_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ bool is_vf = false;
u32 dp_module = 0;
u8 dp_level = 0;
+ switch ((enum qede_pci_private)id->driver_data) {
+ case QEDE_PRIVATE_VF:
+ if (debug & QED_LOG_VERBOSE_MASK)
+ dev_err(&pdev->dev, "Probing a VF\n");
+ is_vf = true;
+ break;
+ default:
+ if (debug & QED_LOG_VERBOSE_MASK)
+ dev_err(&pdev->dev, "Probing a PF\n");
+ }
+
qede_config_debug(debug, &dp_module, &dp_level);
- return __qede_probe(pdev, dp_module, dp_level,
+ return __qede_probe(pdev, dp_module, dp_level, is_vf,
QEDE_PROBE_NORMAL);
}
@@ -2320,7 +2700,7 @@ static void qede_free_sge_mem(struct qede_dev *edev,
if (replace_buf->data) {
dma_unmap_page(&edev->pdev->dev,
- dma_unmap_addr(replace_buf, mapping),
+ replace_buf->mapping,
PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(replace_buf->data);
}
@@ -2420,7 +2800,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev,
goto err;
}
- dma_unmap_addr_set(replace_buf, mapping, mapping);
+ replace_buf->mapping = mapping;
tpa_info->replace_buf.page_offset = 0;
tpa_info->replace_buf_mapping = mapping;
@@ -2463,6 +2843,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_NEXT_PTR,
+ QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(struct eth_rx_bd),
&rxq->rx_bd_ring);
@@ -2474,6 +2855,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME,
QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE,
sizeof(union eth_rx_cqe),
&rxq->rx_comp_ring);
@@ -2525,9 +2907,9 @@ static int qede_alloc_mem_txq(struct qede_dev *edev,
rc = edev->ops->common->chain_alloc(edev->cdev,
QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL,
+ QED_CHAIN_CNT_TYPE_U16,
NUM_TX_BDS_MAX,
- sizeof(*p_virt),
- &txq->tx_pbl);
+ sizeof(*p_virt), &txq->tx_pbl);
if (rc)
goto err;
@@ -2871,15 +3253,16 @@ static int qede_stop_queues(struct qede_dev *edev)
return rc;
}
-static int qede_start_queues(struct qede_dev *edev)
+static int qede_start_queues(struct qede_dev *edev, bool clear_stats)
{
int rc, tc, i;
int vlan_removal_en = 1;
struct qed_dev *cdev = edev->cdev;
- struct qed_update_vport_rss_params *rss_params = &edev->rss_params;
struct qed_update_vport_params vport_update_params;
struct qed_queue_start_common_params q_params;
+ struct qed_dev_info *qed_info = &edev->dev_info.common;
struct qed_start_vport_params start = {0};
+ bool reset_rss_indir = false;
if (!edev->num_rss) {
DP_ERR(edev,
@@ -2892,6 +3275,7 @@ static int qede_start_queues(struct qede_dev *edev)
start.vport_id = 0;
start.drop_ttl0 = true;
start.remove_inner_vlan = vlan_removal_en;
+ start.clear_stats = clear_stats;
rc = edev->ops->vport_start(cdev, &start);
@@ -2971,19 +3355,59 @@ static int qede_start_queues(struct qede_dev *edev)
vport_update_params.update_vport_active_flg = 1;
vport_update_params.vport_active_flg = 1;
+ if ((qed_info->mf_mode == QED_MF_NPAR || pci_num_vf(edev->pdev)) &&
+ qed_info->tx_switching) {
+ vport_update_params.update_tx_switching_flg = 1;
+ vport_update_params.tx_switching_flg = 1;
+ }
+
/* Fill struct with RSS params */
if (QEDE_RSS_CNT(edev) > 1) {
vport_update_params.update_rss_flg = 1;
- for (i = 0; i < 128; i++)
- rss_params->rss_ind_table[i] =
- ethtool_rxfh_indir_default(i, QEDE_RSS_CNT(edev));
- netdev_rss_key_fill(rss_params->rss_key,
- sizeof(rss_params->rss_key));
+
+ /* Need to validate current RSS config uses valid entries */
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ if (edev->rss_params.rss_ind_table[i] >=
+ edev->num_rss) {
+ reset_rss_indir = true;
+ break;
+ }
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_INDIR_INITED) ||
+ reset_rss_indir) {
+ u16 val;
+
+ for (i = 0; i < QED_RSS_IND_TABLE_SIZE; i++) {
+ u16 indir_val;
+
+ val = QEDE_RSS_CNT(edev);
+ indir_val = ethtool_rxfh_indir_default(i, val);
+ edev->rss_params.rss_ind_table[i] = indir_val;
+ }
+ edev->rss_params_inited |= QEDE_RSS_INDIR_INITED;
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_KEY_INITED)) {
+ netdev_rss_key_fill(edev->rss_params.rss_key,
+ sizeof(edev->rss_params.rss_key));
+ edev->rss_params_inited |= QEDE_RSS_KEY_INITED;
+ }
+
+ if (!(edev->rss_params_inited & QEDE_RSS_CAPS_INITED)) {
+ edev->rss_params.rss_caps = QED_RSS_IPV4 |
+ QED_RSS_IPV6 |
+ QED_RSS_IPV4_TCP |
+ QED_RSS_IPV6_TCP;
+ edev->rss_params_inited |= QEDE_RSS_CAPS_INITED;
+ }
+
+ memcpy(&vport_update_params.rss_params, &edev->rss_params,
+ sizeof(vport_update_params.rss_params));
} else {
- memset(rss_params, 0, sizeof(*rss_params));
+ memset(&vport_update_params.rss_params, 0,
+ sizeof(vport_update_params.rss_params));
}
- memcpy(&vport_update_params.rss_params, rss_params,
- sizeof(*rss_params));
rc = edev->ops->vport_update(cdev, &vport_update_params);
if (rc) {
@@ -3061,6 +3485,7 @@ out:
enum qede_load_mode {
QEDE_LOAD_NORMAL,
+ QEDE_LOAD_RELOAD,
};
static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
@@ -3099,7 +3524,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode)
goto err3;
DP_INFO(edev, "Setup IRQs succeeded\n");
- rc = qede_start_queues(edev);
+ rc = qede_start_queues(edev, mode != QEDE_LOAD_RELOAD);
if (rc)
goto err4;
DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n");
@@ -3154,7 +3579,7 @@ void qede_reload(struct qede_dev *edev,
if (func)
func(edev, args);
- qede_load(edev, QEDE_LOAD_NORMAL);
+ qede_load(edev, QEDE_LOAD_RELOAD);
mutex_lock(&edev->qede_lock);
qede_config_rx_mode(edev->ndev);
@@ -3165,12 +3590,20 @@ void qede_reload(struct qede_dev *edev,
static int qede_open(struct net_device *ndev)
{
struct qede_dev *edev = netdev_priv(ndev);
+ int rc;
netif_carrier_off(ndev);
edev->ops->common->set_power_state(edev->cdev, PCI_D0);
- return qede_load(edev, QEDE_LOAD_NORMAL);
+ rc = qede_load(edev, QEDE_LOAD_NORMAL);
+
+ if (rc)
+ return rc;
+
+ udp_tunnel_get_rx_info(ndev);
+
+ return 0;
}
static int qede_close(struct net_device *ndev)
@@ -3221,6 +3654,11 @@ static int qede_set_mac_addr(struct net_device *ndev, void *p)
return -EFAULT;
}
+ if (!edev->ops->check_mac(edev->cdev, addr->sa_data)) {
+ DP_NOTICE(edev, "qed prevents setting MAC\n");
+ return -EINVAL;
+ }
+
ether_addr_copy(ndev->dev_addr, addr->sa_data);
if (!netif_running(ndev)) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index caf6ddb7ea76..49bad00a0f8f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,8 +37,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 64
-#define QLCNIC_LINUX_VERSIONID "5.3.64"
+#define _QLCNIC_LINUX_SUBVERSION 65
+#define QLCNIC_LINUX_VERSIONID "5.3.65"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -1026,10 +1026,8 @@ struct qlcnic_ipaddr {
#define QLCNIC_HAS_PHYS_PORT_ID 0x40000
#define QLCNIC_TSS_RSS 0x80000
-#ifdef CONFIG_QLCNIC_VXLAN
#define QLCNIC_ADD_VXLAN_PORT 0x100000
#define QLCNIC_DEL_VXLAN_PORT 0x200000
-#endif
#define QLCNIC_VLAN_FILTERING 0x800000
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index f9640d5ce6ba..bdbcd2b088a0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2159,7 +2159,6 @@ int qlcnic_83xx_get_mac_address(struct qlcnic_adapter *adapter, u8 *mac,
struct qlcnic_cmd_args cmd;
u32 mac_low, mac_high;
- function = 0;
err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_MAC_ADDRESS);
if (err)
return err;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index bf892160dd5f..a496390b8632 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1020,7 +1020,6 @@ static int qlcnic_83xx_idc_check_state_validity(struct qlcnic_adapter *adapter,
return 0;
}
-#ifdef CONFIG_QLCNIC_VXLAN
#define QLC_83XX_ENCAP_TYPE_VXLAN BIT_1
#define QLC_83XX_MATCH_ENCAP_ID BIT_2
#define QLC_83XX_SET_VXLAN_UDP_DPORT BIT_3
@@ -1089,14 +1088,12 @@ static int qlcnic_set_vxlan_parsing(struct qlcnic_adapter *adapter,
return ret;
}
-#endif
static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
{
if (adapter->fhash.fnum)
qlcnic_prune_lb_filters(adapter);
-#ifdef CONFIG_QLCNIC_VXLAN
if (adapter->flags & QLCNIC_ADD_VXLAN_PORT) {
if (qlcnic_set_vxlan_port(adapter))
return;
@@ -1112,7 +1109,6 @@ static void qlcnic_83xx_periodic_tasks(struct qlcnic_adapter *adapter)
adapter->ahw->vxlan_port = 0;
adapter->flags &= ~QLCNIC_DEL_VXLAN_PORT;
}
-#endif
}
/**
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
index 9777e5713525..f4aa6331b367 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.h
@@ -45,7 +45,6 @@ struct qlcnic_dcb {
static inline void qlcnic_clear_dcb_ops(struct qlcnic_dcb *dcb)
{
kfree(dcb);
- dcb = NULL;
}
static inline int qlcnic_dcb_get_hw_capability(struct qlcnic_dcb *dcb)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 7bd6f25b4625..fedd7366713c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -102,7 +102,6 @@
#define QLCNIC_RESPONSE_DESC 0x05
#define QLCNIC_LRO_DESC 0x12
-#define QLCNIC_TX_POLL_BUDGET 128
#define QLCNIC_TCP_HDR_SIZE 20
#define QLCNIC_TCP_TS_OPTION_SIZE 12
#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
@@ -772,6 +771,8 @@ netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring->tx_stats.xmit_called++;
+ /* Ensure writes are complete before HW fetches Tx descriptors */
+ wmb();
qlcnic_update_cmd_producer(tx_ring);
return NETDEV_TX_OK;
@@ -2006,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_adapter *adapter;
- budget = QLCNIC_TX_POLL_BUDGET;
tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
adapter = tx_ring->adapter;
work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
@@ -2220,7 +2220,7 @@ void qlcnic_83xx_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
if (!opcode)
return;
- ring = QLCNIC_FETCH_RING_ID(qlcnic_83xx_hndl(sts_data[0]));
+ ring = QLCNIC_FETCH_RING_ID(sts_data[0]);
qlcnic_83xx_process_rcv_diag(adapter, ring, sts_data);
desc = &sds_ring->desc_head[consumer];
desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1205f6f9c941..3ebef27e0964 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -16,9 +16,7 @@
#include <linux/aer.h>
#include <linux/log2.h>
#include <linux/pci.h>
-#ifdef CONFIG_QLCNIC_VXLAN
#include <net/vxlan.h>
-#endif
#include "qlcnic.h"
#include "qlcnic_sriov.h"
@@ -474,13 +472,15 @@ static int qlcnic_get_phys_port_id(struct net_device *netdev,
return 0;
}
-#ifdef CONFIG_QLCNIC_VXLAN
static void qlcnic_add_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
/* Adapter supports only one VXLAN port. Use very first port
* for enabling offload
*/
@@ -488,23 +488,26 @@ static void qlcnic_add_vxlan_port(struct net_device *netdev,
return;
if (!ahw->vxlan_port_count) {
ahw->vxlan_port_count = 1;
- ahw->vxlan_port = ntohs(port);
+ ahw->vxlan_port = ntohs(ti->port);
adapter->flags |= QLCNIC_ADD_VXLAN_PORT;
return;
}
- if (ahw->vxlan_port == ntohs(port))
+ if (ahw->vxlan_port == ntohs(ti->port))
ahw->vxlan_port_count++;
}
static void qlcnic_del_vxlan_port(struct net_device *netdev,
- sa_family_t sa_family, __be16 port)
+ struct udp_tunnel_info *ti)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
+ return;
+
if (!qlcnic_encap_rx_offload(adapter) || !ahw->vxlan_port_count ||
- (ahw->vxlan_port != ntohs(port)))
+ (ahw->vxlan_port != ntohs(ti->port)))
return;
ahw->vxlan_port_count--;
@@ -519,7 +522,6 @@ static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
features = vlan_features_check(skb, features);
return vxlan_features_check(skb, features);
}
-#endif
static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_open = qlcnic_open,
@@ -539,11 +541,9 @@ static const struct net_device_ops qlcnic_netdev_ops = {
.ndo_fdb_del = qlcnic_fdb_del,
.ndo_fdb_dump = qlcnic_fdb_dump,
.ndo_get_phys_port_id = qlcnic_get_phys_port_id,
-#ifdef CONFIG_QLCNIC_VXLAN
- .ndo_add_vxlan_port = qlcnic_add_vxlan_port,
- .ndo_del_vxlan_port = qlcnic_del_vxlan_port,
+ .ndo_udp_tunnel_add = qlcnic_add_vxlan_port,
+ .ndo_udp_tunnel_del = qlcnic_del_vxlan_port,
.ndo_features_check = qlcnic_features_check,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
#endif
@@ -2015,10 +2015,8 @@ qlcnic_attach(struct qlcnic_adapter *adapter)
qlcnic_create_sysfs_entries(adapter);
-#ifdef CONFIG_QLCNIC_VXLAN
if (qlcnic_encap_rx_offload(adapter))
- vxlan_get_rx_port(netdev);
-#endif
+ udp_tunnel_get_rx_info(netdev);
adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
return 0;
@@ -3952,8 +3950,14 @@ static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *pdev,
static pci_ers_result_t qlcnic_82xx_io_slot_reset(struct pci_dev *pdev)
{
- return qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
- PCI_ERS_RESULT_RECOVERED;
+ pci_ers_result_t res;
+
+ rtnl_lock();
+ res = qlcnic_attach_func(pdev) ? PCI_ERS_RESULT_DISCONNECT :
+ PCI_ERS_RESULT_RECOVERED;
+ rtnl_unlock();
+
+ return res;
}
static void qlcnic_82xx_io_resume(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 017d8c2c8285..24061b9b92e8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -156,10 +156,8 @@ struct qlcnic_vf_info {
spinlock_t vlan_list_lock; /* Lock for VLAN list */
};
-struct qlcnic_async_work_list {
+struct qlcnic_async_cmd {
struct list_head list;
- struct work_struct work;
- void *ptr;
struct qlcnic_cmd_args *cmd;
};
@@ -168,7 +166,10 @@ struct qlcnic_back_channel {
struct workqueue_struct *bc_trans_wq;
struct workqueue_struct *bc_async_wq;
struct workqueue_struct *bc_flr_wq;
- struct list_head async_list;
+ struct qlcnic_adapter *adapter;
+ struct list_head async_cmd_list;
+ struct work_struct vf_async_work;
+ spinlock_t queue_lock; /* async_cmd_list queue lock */
};
struct qlcnic_sriov {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 7327b729ba2e..d7107055ec60 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -29,6 +29,7 @@
#define QLC_83XX_VF_RESET_FAIL_THRESH 8
#define QLC_BC_CMD_MAX_RETRY_CNT 5
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
@@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
}
bc->bc_async_wq = wq;
- INIT_LIST_HEAD(&bc->async_list);
+ INIT_LIST_HEAD(&bc->async_cmd_list);
+ INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd);
+ spin_lock_init(&bc->queue_lock);
+ bc->adapter = adapter;
for (i = 0; i < num_vfs; i++) {
vf = &sriov->vf_info[i];
@@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
{
- struct list_head *head = &bc->async_list;
- struct qlcnic_async_work_list *entry;
+ struct list_head *head = &bc->async_cmd_list;
+ struct qlcnic_async_cmd *entry;
flush_workqueue(bc->bc_async_wq);
+ cancel_work_sync(&bc->vf_async_work);
+
+ spin_lock(&bc->queue_lock);
while (!list_empty(head)) {
- entry = list_entry(head->next, struct qlcnic_async_work_list,
+ entry = list_entry(head->next, struct qlcnic_async_cmd,
list);
- cancel_work_sync(&entry->work);
list_del(&entry->list);
+ kfree(entry->cmd);
kfree(entry);
}
+ spin_unlock(&bc->queue_lock);
}
void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
@@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
{
- struct qlcnic_async_work_list *entry;
- struct qlcnic_adapter *adapter;
+ struct qlcnic_async_cmd *entry, *tmp;
+ struct qlcnic_back_channel *bc;
struct qlcnic_cmd_args *cmd;
+ struct list_head *head;
+ LIST_HEAD(del_list);
+
+ bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
+ head = &bc->async_cmd_list;
+
+ spin_lock(&bc->queue_lock);
+ list_splice_init(head, &del_list);
+ spin_unlock(&bc->queue_lock);
+
+ list_for_each_entry_safe(entry, tmp, &del_list, list) {
+ list_del(&entry->list);
+ cmd = entry->cmd;
+ __qlcnic_sriov_issue_cmd(bc->adapter, cmd);
+ kfree(entry);
+ }
+
+ if (!list_empty(head))
+ queue_work(bc->bc_async_wq, &bc->vf_async_work);
- entry = container_of(work, struct qlcnic_async_work_list, work);
- adapter = entry->ptr;
- cmd = entry->cmd;
- __qlcnic_sriov_issue_cmd(adapter, cmd);
return;
}
-static struct qlcnic_async_work_list *
-qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
+static struct qlcnic_async_cmd *
+qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc,
+ struct qlcnic_cmd_args *cmd)
{
- struct list_head *node;
- struct qlcnic_async_work_list *entry = NULL;
- u8 empty = 0;
+ struct qlcnic_async_cmd *entry = NULL;
- list_for_each(node, &bc->async_list) {
- entry = list_entry(node, struct qlcnic_async_work_list, list);
- if (!work_pending(&entry->work)) {
- empty = 1;
- break;
- }
- }
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return NULL;
- if (!empty) {
- entry = kzalloc(sizeof(struct qlcnic_async_work_list),
- GFP_ATOMIC);
- if (entry == NULL)
- return NULL;
- list_add_tail(&entry->list, &bc->async_list);
- }
+ entry->cmd = cmd;
+
+ spin_lock(&bc->queue_lock);
+ list_add_tail(&entry->list, &bc->async_cmd_list);
+ spin_unlock(&bc->queue_lock);
return entry;
}
static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
- work_func_t func, void *data,
struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_async_work_list *entry = NULL;
+ struct qlcnic_async_cmd *entry = NULL;
- entry = qlcnic_sriov_get_free_node_async_work(bc);
- if (!entry)
+ entry = qlcnic_sriov_alloc_async_cmd(bc, cmd);
+ if (!entry) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
return;
+ }
- entry->ptr = data;
- entry->cmd = cmd;
- INIT_WORK(&entry->work, func);
- queue_work(bc->bc_async_wq, &entry->work);
+ queue_work(bc->bc_async_wq, &bc->vf_async_work);
}
static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
@@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
if (adapter->need_fw_reset)
return -EIO;
- qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
- adapter, cmd);
+ qlcnic_sriov_schedule_async_cmd(bc, cmd);
+
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index b28e73ea2c25..fd4a8e473f11 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1892,7 +1892,6 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
skb->len += length;
skb->data_len += length;
skb->truesize += length;
- length -= length;
ql_update_mac_hdr_len(qdev, ib_mac_rsp,
lbq_desc->p.pg_chunk.va,
&hlen);
@@ -4687,7 +4686,7 @@ static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
/*
* Set up the operating parameters.
*/
- qdev->workqueue = create_singlethread_workqueue(ndev->name);
+ qdev->workqueue = alloc_ordered_workqueue(ndev->name, WQ_MEM_RECLAIM);
INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
@@ -4846,7 +4845,6 @@ static void ql_eeh_close(struct net_device *ndev)
}
/* Disabling the timer */
- del_timer_sync(&qdev->timer);
ql_cancel_all_work_sync(qdev);
for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4873,6 +4871,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_CAN_RECOVER;
case pci_channel_io_frozen:
netif_device_detach(ndev);
+ del_timer_sync(&qdev->timer);
if (netif_running(ndev))
ql_eeh_close(ndev);
pci_disable_device(pdev);
@@ -4880,6 +4879,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
case pci_channel_io_perm_failure:
dev_err(&pdev->dev,
"%s: pci_channel_io_perm_failure.\n", __func__);
+ del_timer_sync(&qdev->timer);
ql_eeh_close(ndev);
set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 1ef03939d25f..6e2add979471 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -719,7 +719,7 @@ qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
qca->stats.ring_full++;
}
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
if (qca->spi_thread &&
qca->spi_thread->state != TASK_RUNNING)
@@ -734,7 +734,7 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
struct qcaspi *qca = netdev_priv(dev);
netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
- jiffies, jiffies - dev->trans_start);
+ jiffies, jiffies - dev_trans_start(dev));
qca->net_dev->stats.tx_errors++;
/* Trigger tx queue flush and QCA7000 reset */
qca->sync = QCASPI_SYNC_UNKNOWN;
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 6b541e57c96a..cb29ee24cf1b 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -4,7 +4,7 @@
* Copyright (C) 2004 Sten Wang <sten.wang@rdc.com.tw>
* Copyright (C) 2007
* Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>
- * Copyright (C) 2007-2012 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2007-2012 Florian Fainelli <f.fainelli@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -48,8 +48,8 @@
#include <asm/processor.h>
#define DRV_NAME "r6040"
-#define DRV_VERSION "0.28"
-#define DRV_RELDATE "07Oct2011"
+#define DRV_VERSION "0.29"
+#define DRV_RELDATE "04Jul2016"
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (6000 * HZ / 1000)
@@ -162,7 +162,7 @@
MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
"Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
- "Florian Fainelli <florian@openwrt.org>");
+ "Florian Fainelli <f.fainelli@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RDC R6040 NAPI PCI FastEthernet driver");
MODULE_VERSION(DRV_VERSION " " DRV_RELDATE);
@@ -200,7 +200,6 @@ struct r6040_private {
struct mii_bus *mii_bus;
struct napi_struct napi;
void __iomem *base;
- struct phy_device *phydev;
int old_link;
int old_duplex;
};
@@ -474,7 +473,7 @@ static void r6040_down(struct net_device *dev)
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
- phy_stop(lp->phydev);
+ phy_stop(dev->phydev);
}
static int r6040_close(struct net_device *dev)
@@ -515,12 +514,10 @@ static int r6040_close(struct net_device *dev)
static int r6040_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct r6040_private *lp = netdev_priv(dev);
-
- if (!lp->phydev)
+ if (!dev->phydev)
return -EINVAL;
- return phy_mii_ioctl(lp->phydev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
static int r6040_rx(struct net_device *dev, int limit)
@@ -617,10 +614,15 @@ static void r6040_tx(struct net_device *dev)
if (descptr->status & DSC_OWNER_MAC)
break; /* Not complete */
skb_ptr = descptr->skb_ptr;
+
+ /* Statistic Counter */
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb_ptr->len;
+
pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
skb_ptr->len, PCI_DMA_TODEVICE);
/* Free buffer */
- dev_kfree_skb_irq(skb_ptr);
+ dev_kfree_skb(skb_ptr);
descptr->skb_ptr = NULL;
/* To next descriptor */
descptr = descptr->vndescp;
@@ -641,12 +643,15 @@ static int r6040_poll(struct napi_struct *napi, int budget)
void __iomem *ioaddr = priv->base;
int work_done;
+ r6040_tx(dev);
+
work_done = r6040_rx(dev, budget);
if (work_done < budget) {
- napi_complete(napi);
- /* Enable RX interrupt */
- iowrite16(ioread16(ioaddr + MIER) | RX_INTS, ioaddr + MIER);
+ napi_complete_done(napi, work_done);
+ /* Enable RX/TX interrupt */
+ iowrite16(ioread16(ioaddr + MIER) | RX_INTS | TX_INTS,
+ ioaddr + MIER);
}
return work_done;
}
@@ -673,7 +678,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
}
/* RX interrupt request */
- if (status & RX_INTS) {
+ if (status & (RX_INTS | TX_INTS)) {
if (status & RX_NO_DESC) {
/* RX descriptor unavailable */
dev->stats.rx_dropped++;
@@ -684,15 +689,11 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
if (likely(napi_schedule_prep(&lp->napi))) {
/* Mask off RX interrupt */
- misr &= ~RX_INTS;
- __napi_schedule(&lp->napi);
+ misr &= ~(RX_INTS | TX_INTS);
+ __napi_schedule_irqoff(&lp->napi);
}
}
- /* TX interrupt request */
- if (status & TX_INTS)
- r6040_tx(dev);
-
/* Restore RDC MAC interrupt */
iowrite16(misr, ioaddr + MIER);
@@ -732,7 +733,7 @@ static int r6040_up(struct net_device *dev)
/* Initialize all MAC registers */
r6040_init_mac_regs(dev);
- phy_start(lp->phydev);
+ phy_start(dev->phydev);
return 0;
}
@@ -813,6 +814,9 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
void __iomem *ioaddr = lp->base;
unsigned long flags;
+ if (skb_put_padto(skb, ETH_ZLEN) < 0)
+ return NETDEV_TX_OK;
+
/* Critical Section */
spin_lock_irqsave(&lp->lock, flags);
@@ -824,17 +828,10 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
- /* Statistic Counter */
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += skb->len;
/* Set TX descriptor & Transmit it */
lp->tx_free_desc--;
descptr = lp->tx_insert_ptr;
- if (skb->len < ETH_ZLEN)
- descptr->len = ETH_ZLEN;
- else
- descptr->len = skb->len;
-
+ descptr->len = skb->len;
descptr->skb_ptr = skb;
descptr->buf = cpu_to_le32(pci_map_single(lp->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE));
@@ -843,7 +840,8 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
skb_tx_timestamp(skb);
/* Trigger the MAC to check the TX descriptor */
- iowrite16(TM2TX, ioaddr + MTPR);
+ if (!skb->xmit_more || netif_queue_stopped(dev))
+ iowrite16(TM2TX, ioaddr + MTPR);
lp->tx_insert_ptr = descptr->vndescp;
/* If no tx resource, stop */
@@ -957,26 +955,12 @@ static void netdev_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct r6040_private *rp = netdev_priv(dev);
-
- return phy_ethtool_gset(rp->phydev, cmd);
-}
-
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct r6040_private *rp = netdev_priv(dev);
-
- return phy_ethtool_sset(rp->phydev, cmd);
-}
-
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
- .get_settings = netdev_get_settings,
- .set_settings = netdev_set_settings,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops r6040_netdev_ops = {
@@ -998,7 +982,7 @@ static const struct net_device_ops r6040_netdev_ops = {
static void r6040_adjust_link(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
- struct phy_device *phydev = lp->phydev;
+ struct phy_device *phydev = dev->phydev;
int status_changed = 0;
void __iomem *ioaddr = lp->base;
@@ -1018,14 +1002,8 @@ static void r6040_adjust_link(struct net_device *dev)
lp->old_duplex = phydev->duplex;
}
- if (status_changed) {
- pr_info("%s: link %s", dev->name, phydev->link ?
- "UP" : "DOWN");
- if (phydev->link)
- pr_cont(" - %d/%s", phydev->speed,
- DUPLEX_FULL == phydev->duplex ? "full" : "half");
- pr_cont("\n");
- }
+ if (status_changed)
+ phy_print_status(phydev);
}
static int r6040_mii_probe(struct net_device *dev)
@@ -1057,7 +1035,6 @@ static int r6040_mii_probe(struct net_device *dev)
| SUPPORTED_TP);
phydev->advertising = phydev->supported;
- lp->phydev = phydev;
lp->old_link = 0;
lp->old_duplex = -1;
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index deae10d7426d..5297bf77211c 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -467,8 +467,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
unsigned int rx_tail = cp->rx_tail;
int rx;
-rx_status_loop:
rx = 0;
+rx_status_loop:
cpw16(IntrStatus, cp_rx_intr_mask);
while (rx < budget) {
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index ef668d300800..da4c2d8a4173 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1667,6 +1667,10 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
int i;
u8 tmp8;
+ napi_disable(&tp->napi);
+ netif_stop_queue(dev);
+ synchronize_sched();
+
netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n",
RTL_R8(ChipCmd), RTL_R16(IntrStatus),
RTL_R16(IntrMask), RTL_R8(MediaStatus));
@@ -1696,10 +1700,10 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
spin_unlock_irq(&tp->lock);
/* ...and finally, reset everything */
- if (netif_running(dev)) {
- rtl8139_hw_start (dev);
- netif_wake_queue (dev);
- }
+ napi_enable(&tp->napi);
+ rtl8139_hw_start(dev);
+ netif_wake_queue(dev);
+
spin_unlock_bh(&tp->rx_lock);
}
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index d77d60ea8202..5cb96785fb63 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -544,7 +544,7 @@ static void tx_timeout(struct net_device *dev)
dev->stats.tx_errors++;
/* Try to restart the adapter. */
hardware_init(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
dev->stats.tx_errors++;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 94f08f1e841c..e55638c7505a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -345,7 +345,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
static int rx_buf_sz = 16383;
-static int use_dac;
+static int use_dac = -1;
static struct {
u32 msg_enable;
} debug = { -1 };
@@ -1749,13 +1749,21 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = &tp->pci_dev->dev;
+
+ pm_runtime_get_noresume(d);
rtl_lock_work(tp);
wol->supported = WAKE_ANY;
- wol->wolopts = __rtl8169_get_wol(tp);
+ if (pm_runtime_active(d))
+ wol->wolopts = __rtl8169_get_wol(tp);
+ else
+ wol->wolopts = tp->saved_wolopts;
rtl_unlock_work(tp);
+
+ pm_runtime_put_noidle(d);
}
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
@@ -1845,6 +1853,9 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = &tp->pci_dev->dev;
+
+ pm_runtime_get_noresume(d);
rtl_lock_work(tp);
@@ -1852,12 +1863,17 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
tp->features |= RTL_FEATURE_WOL;
else
tp->features &= ~RTL_FEATURE_WOL;
- __rtl8169_set_wol(tp, wol->wolopts);
+ if (pm_runtime_active(d))
+ __rtl8169_set_wol(tp, wol->wolopts);
+ else
+ tp->saved_wolopts = wol->wolopts;
rtl_unlock_work(tp);
device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
+ pm_runtime_put_noidle(d);
+
return 0;
}
@@ -2292,11 +2308,17 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = &tp->pci_dev->dev;
struct rtl8169_counters *counters = tp->counters;
ASSERT_RTNL();
- rtl8169_update_counters(dev);
+ pm_runtime_get_noresume(d);
+
+ if (pm_runtime_active(d))
+ rtl8169_update_counters(dev);
+
+ pm_runtime_put_noidle(d);
data[0] = le64_to_cpu(counters->tx_packets);
data[1] = le64_to_cpu(counters->rx_packets);
@@ -4458,6 +4480,7 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
static int rtl_set_mac_address(struct net_device *dev, void *p)
{
struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = &tp->pci_dev->dev;
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
@@ -4465,7 +4488,12 @@ static int rtl_set_mac_address(struct net_device *dev, void *p)
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- rtl_rar_set(tp, dev->dev_addr);
+ pm_runtime_get_noresume(d);
+
+ if (pm_runtime_active(d))
+ rtl_rar_set(tp, dev->dev_addr);
+
+ pm_runtime_put_noidle(d);
return 0;
}
@@ -7868,6 +7896,7 @@ static int rtl8169_runtime_resume(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
+ rtl_rar_set(tp, dev->dev_addr);
if (!tp->TxDescArray)
return 0;
@@ -8224,20 +8253,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_mwi_2;
}
- tp->cp_cmd = 0;
-
- if ((sizeof(dma_addr_t) > 4) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
- tp->cp_cmd |= PCIDAC;
- dev->features |= NETIF_F_HIGHDMA;
- } else {
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (rc < 0) {
- netif_err(tp, probe, dev, "DMA configuration failed\n");
- goto err_out_free_res_3;
- }
- }
-
/* ioremap MMIO region */
ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
if (!ioaddr) {
@@ -8253,6 +8268,25 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Identify chip attached to board */
rtl8169_get_mac_version(tp, dev, cfg->default_ver);
+ tp->cp_cmd = 0;
+
+ if ((sizeof(dma_addr_t) > 4) &&
+ (use_dac == 1 || (use_dac == -1 && pci_is_pcie(pdev) &&
+ tp->mac_version >= RTL_GIGA_MAC_VER_18)) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+
+ /* CPlusCmd Dual Access Cycle is only needed for non-PCIe */
+ if (!pci_is_pcie(pdev))
+ tp->cp_cmd |= PCIDAC;
+ dev->features |= NETIF_F_HIGHDMA;
+ } else {
+ rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "DMA configuration failed\n");
+ goto err_out_unmap_4;
+ }
+ }
+
rtl_init_rxcfg(tp);
rtl_irq_disable(tp);
@@ -8412,12 +8446,12 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
&tp->counters_phys_addr, GFP_KERNEL);
if (!tp->counters) {
rc = -ENOMEM;
- goto err_out_msi_4;
+ goto err_out_msi_5;
}
rc = register_netdev(dev);
if (rc < 0)
- goto err_out_cnt_5;
+ goto err_out_cnt_6;
pci_set_drvdata(pdev, dev);
@@ -8451,12 +8485,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
out:
return rc;
-err_out_cnt_5:
+err_out_cnt_6:
dma_free_coherent(&pdev->dev, sizeof(*tp->counters), tp->counters,
tp->counters_phys_addr);
-err_out_msi_4:
+err_out_msi_5:
netif_napi_del(&tp->napi);
rtl_disable_msi(pdev, tp);
+err_out_unmap_4:
iounmap(ioaddr);
err_out_free_res_3:
pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index b2160d1b9c71..4e5d5e953e15 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -157,6 +157,7 @@ enum ravb_reg {
TIC = 0x0378,
TIS = 0x037C,
ISS = 0x0380,
+ CIE = 0x0384, /* R-Car Gen3 only */
GCCR = 0x0390,
GMTT = 0x0394,
GPTC = 0x0398,
@@ -170,6 +171,15 @@ enum ravb_reg {
GCT0 = 0x03B8,
GCT1 = 0x03BC,
GCT2 = 0x03C0,
+ GIE = 0x03CC, /* R-Car Gen3 only */
+ GID = 0x03D0, /* R-Car Gen3 only */
+ DIL = 0x0440, /* R-Car Gen3 only */
+ RIE0 = 0x0460, /* R-Car Gen3 only */
+ RID0 = 0x0464, /* R-Car Gen3 only */
+ RIE2 = 0x0470, /* R-Car Gen3 only */
+ RID2 = 0x0474, /* R-Car Gen3 only */
+ TIE = 0x0478, /* R-Car Gen3 only */
+ TID = 0x047c, /* R-Car Gen3 only */
/* E-MAC registers */
ECMR = 0x0500,
@@ -556,6 +566,16 @@ enum ISS_BIT {
ISS_DPS15 = 0x80000000,
};
+/* CIE (R-Car Gen3 only) */
+enum CIE_BIT {
+ CIE_CRIE = 0x00000001,
+ CIE_CTIE = 0x00000100,
+ CIE_RQFM = 0x00010000,
+ CIE_CL0M = 0x00020000,
+ CIE_RFWL = 0x00040000,
+ CIE_RFFL = 0x00080000,
+};
+
/* GCCR */
enum GCCR_BIT {
GCCR_TCR = 0x00000003,
@@ -592,6 +612,188 @@ enum GIS_BIT {
GIS_PTMF = 0x00000004,
};
+/* GIE (R-Car Gen3 only) */
+enum GIE_BIT {
+ GIE_PTCS = 0x00000001,
+ GIE_PTOS = 0x00000002,
+ GIE_PTMS0 = 0x00000004,
+ GIE_PTMS1 = 0x00000008,
+ GIE_PTMS2 = 0x00000010,
+ GIE_PTMS3 = 0x00000020,
+ GIE_PTMS4 = 0x00000040,
+ GIE_PTMS5 = 0x00000080,
+ GIE_PTMS6 = 0x00000100,
+ GIE_PTMS7 = 0x00000200,
+ GIE_ATCS0 = 0x00010000,
+ GIE_ATCS1 = 0x00020000,
+ GIE_ATCS2 = 0x00040000,
+ GIE_ATCS3 = 0x00080000,
+ GIE_ATCS4 = 0x00100000,
+ GIE_ATCS5 = 0x00200000,
+ GIE_ATCS6 = 0x00400000,
+ GIE_ATCS7 = 0x00800000,
+ GIE_ATCS8 = 0x01000000,
+ GIE_ATCS9 = 0x02000000,
+ GIE_ATCS10 = 0x04000000,
+ GIE_ATCS11 = 0x08000000,
+ GIE_ATCS12 = 0x10000000,
+ GIE_ATCS13 = 0x20000000,
+ GIE_ATCS14 = 0x40000000,
+ GIE_ATCS15 = 0x80000000,
+};
+
+/* GID (R-Car Gen3 only) */
+enum GID_BIT {
+ GID_PTCD = 0x00000001,
+ GID_PTOD = 0x00000002,
+ GID_PTMD0 = 0x00000004,
+ GID_PTMD1 = 0x00000008,
+ GID_PTMD2 = 0x00000010,
+ GID_PTMD3 = 0x00000020,
+ GID_PTMD4 = 0x00000040,
+ GID_PTMD5 = 0x00000080,
+ GID_PTMD6 = 0x00000100,
+ GID_PTMD7 = 0x00000200,
+ GID_ATCD0 = 0x00010000,
+ GID_ATCD1 = 0x00020000,
+ GID_ATCD2 = 0x00040000,
+ GID_ATCD3 = 0x00080000,
+ GID_ATCD4 = 0x00100000,
+ GID_ATCD5 = 0x00200000,
+ GID_ATCD6 = 0x00400000,
+ GID_ATCD7 = 0x00800000,
+ GID_ATCD8 = 0x01000000,
+ GID_ATCD9 = 0x02000000,
+ GID_ATCD10 = 0x04000000,
+ GID_ATCD11 = 0x08000000,
+ GID_ATCD12 = 0x10000000,
+ GID_ATCD13 = 0x20000000,
+ GID_ATCD14 = 0x40000000,
+ GID_ATCD15 = 0x80000000,
+};
+
+/* RIE0 (R-Car Gen3 only) */
+enum RIE0_BIT {
+ RIE0_FRS0 = 0x00000001,
+ RIE0_FRS1 = 0x00000002,
+ RIE0_FRS2 = 0x00000004,
+ RIE0_FRS3 = 0x00000008,
+ RIE0_FRS4 = 0x00000010,
+ RIE0_FRS5 = 0x00000020,
+ RIE0_FRS6 = 0x00000040,
+ RIE0_FRS7 = 0x00000080,
+ RIE0_FRS8 = 0x00000100,
+ RIE0_FRS9 = 0x00000200,
+ RIE0_FRS10 = 0x00000400,
+ RIE0_FRS11 = 0x00000800,
+ RIE0_FRS12 = 0x00001000,
+ RIE0_FRS13 = 0x00002000,
+ RIE0_FRS14 = 0x00004000,
+ RIE0_FRS15 = 0x00008000,
+ RIE0_FRS16 = 0x00010000,
+ RIE0_FRS17 = 0x00020000,
+};
+
+/* RID0 (R-Car Gen3 only) */
+enum RID0_BIT {
+ RID0_FRD0 = 0x00000001,
+ RID0_FRD1 = 0x00000002,
+ RID0_FRD2 = 0x00000004,
+ RID0_FRD3 = 0x00000008,
+ RID0_FRD4 = 0x00000010,
+ RID0_FRD5 = 0x00000020,
+ RID0_FRD6 = 0x00000040,
+ RID0_FRD7 = 0x00000080,
+ RID0_FRD8 = 0x00000100,
+ RID0_FRD9 = 0x00000200,
+ RID0_FRD10 = 0x00000400,
+ RID0_FRD11 = 0x00000800,
+ RID0_FRD12 = 0x00001000,
+ RID0_FRD13 = 0x00002000,
+ RID0_FRD14 = 0x00004000,
+ RID0_FRD15 = 0x00008000,
+ RID0_FRD16 = 0x00010000,
+ RID0_FRD17 = 0x00020000,
+};
+
+/* RIE2 (R-Car Gen3 only) */
+enum RIE2_BIT {
+ RIE2_QFS0 = 0x00000001,
+ RIE2_QFS1 = 0x00000002,
+ RIE2_QFS2 = 0x00000004,
+ RIE2_QFS3 = 0x00000008,
+ RIE2_QFS4 = 0x00000010,
+ RIE2_QFS5 = 0x00000020,
+ RIE2_QFS6 = 0x00000040,
+ RIE2_QFS7 = 0x00000080,
+ RIE2_QFS8 = 0x00000100,
+ RIE2_QFS9 = 0x00000200,
+ RIE2_QFS10 = 0x00000400,
+ RIE2_QFS11 = 0x00000800,
+ RIE2_QFS12 = 0x00001000,
+ RIE2_QFS13 = 0x00002000,
+ RIE2_QFS14 = 0x00004000,
+ RIE2_QFS15 = 0x00008000,
+ RIE2_QFS16 = 0x00010000,
+ RIE2_QFS17 = 0x00020000,
+ RIE2_RFFS = 0x80000000,
+};
+
+/* RID2 (R-Car Gen3 only) */
+enum RID2_BIT {
+ RID2_QFD0 = 0x00000001,
+ RID2_QFD1 = 0x00000002,
+ RID2_QFD2 = 0x00000004,
+ RID2_QFD3 = 0x00000008,
+ RID2_QFD4 = 0x00000010,
+ RID2_QFD5 = 0x00000020,
+ RID2_QFD6 = 0x00000040,
+ RID2_QFD7 = 0x00000080,
+ RID2_QFD8 = 0x00000100,
+ RID2_QFD9 = 0x00000200,
+ RID2_QFD10 = 0x00000400,
+ RID2_QFD11 = 0x00000800,
+ RID2_QFD12 = 0x00001000,
+ RID2_QFD13 = 0x00002000,
+ RID2_QFD14 = 0x00004000,
+ RID2_QFD15 = 0x00008000,
+ RID2_QFD16 = 0x00010000,
+ RID2_QFD17 = 0x00020000,
+ RID2_RFFD = 0x80000000,
+};
+
+/* TIE (R-Car Gen3 only) */
+enum TIE_BIT {
+ TIE_FTS0 = 0x00000001,
+ TIE_FTS1 = 0x00000002,
+ TIE_FTS2 = 0x00000004,
+ TIE_FTS3 = 0x00000008,
+ TIE_TFUS = 0x00000100,
+ TIE_TFWS = 0x00000200,
+ TIE_MFUS = 0x00000400,
+ TIE_MFWS = 0x00000800,
+ TIE_TDPS0 = 0x00010000,
+ TIE_TDPS1 = 0x00020000,
+ TIE_TDPS2 = 0x00040000,
+ TIE_TDPS3 = 0x00080000,
+};
+
+/* TID (R-Car Gen3 only) */
+enum TID_BIT {
+ TID_FTD0 = 0x00000001,
+ TID_FTD1 = 0x00000002,
+ TID_FTD2 = 0x00000004,
+ TID_FTD3 = 0x00000008,
+ TID_TFUD = 0x00000100,
+ TID_TFWD = 0x00000200,
+ TID_MFUD = 0x00000400,
+ TID_MFWD = 0x00000800,
+ TID_TDPD0 = 0x00010000,
+ TID_TDPD1 = 0x00020000,
+ TID_TDPD2 = 0x00040000,
+ TID_TDPD3 = 0x00080000,
+};
+
/* ECMR */
enum ECMR_BIT {
ECMR_PRM = 0x00000001,
@@ -817,6 +1019,8 @@ struct ravb_private {
int duplex;
int emac_irq;
enum ravb_chip_id chip_id;
+ int rx_irqs[NUM_RX_QUEUE];
+ int tx_irqs[NUM_TX_QUEUE];
unsigned no_avb_link:1;
unsigned avb_link_active_low:1;
@@ -841,7 +1045,7 @@ void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
u32 set);
int ravb_wait(struct net_device *ndev, enum ravb_reg reg, u32 mask, u32 value);
-irqreturn_t ravb_ptp_interrupt(struct net_device *ndev);
+void ravb_ptp_interrupt(struct net_device *ndev);
void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev);
void ravb_ptp_stop(struct net_device *ndev);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 4277d0c12101..1e1cc0fad17f 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -42,6 +42,16 @@
NETIF_MSG_RX_ERR | \
NETIF_MSG_TX_ERR)
+static const char *ravb_rx_irqs[NUM_RX_QUEUE] = {
+ "ch0", /* RAVB_BE */
+ "ch1", /* RAVB_NC */
+};
+
+static const char *ravb_tx_irqs[NUM_TX_QUEUE] = {
+ "ch18", /* RAVB_BE */
+ "ch19", /* RAVB_NC */
+};
+
void ravb_modify(struct net_device *ndev, enum ravb_reg reg, u32 clear,
u32 set)
{
@@ -236,10 +246,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
rx_desc = &priv->rx_ring[q][i];
- /* The size of the buffer should be on 16-byte boundary. */
- rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+ rx_desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- ALIGN(PKT_BUF_SZ, 16),
+ PKT_BUF_SZ,
DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
@@ -353,8 +362,6 @@ static void ravb_emac_init(struct net_device *ndev)
ravb_write(ndev,
(ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
- ravb_write(ndev, 1, MPR);
-
/* E-MAC status register clear */
ravb_write(ndev, ECSR_ICD | ECSR_MPD, ECSR);
@@ -365,6 +372,7 @@ static void ravb_emac_init(struct net_device *ndev)
/* Device init function for Ethernet AVB */
static int ravb_dmac_init(struct net_device *ndev)
{
+ struct ravb_private *priv = netdev_priv(ndev);
int error;
/* Set CONFIG mode */
@@ -392,7 +400,8 @@ static int ravb_dmac_init(struct net_device *ndev)
#endif
/* Set AVB RX */
- ravb_write(ndev, RCR_EFFS | RCR_ENCF | RCR_ETS0 | 0x18000000, RCR);
+ ravb_write(ndev,
+ RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
/* Set FIFO size */
ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
@@ -401,6 +410,12 @@ static int ravb_dmac_init(struct net_device *ndev)
ravb_write(ndev, TCCR_TFEN, TCCR);
/* Interrupt init: */
+ if (priv->chip_id == RCAR_GEN3) {
+ /* Clear DIL.DPLx */
+ ravb_write(ndev, 0, DIL);
+ /* Set queue specific interrupt */
+ ravb_write(ndev, CIE_CRIE | CIE_CTIE | CIE_CL0M, CIE);
+ }
/* Frame receive */
ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
/* Disable FIFO full warning */
@@ -541,7 +556,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- ALIGN(PKT_BUF_SZ, 16),
+ PKT_BUF_SZ,
DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -571,8 +586,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry];
- /* The size of the buffer should be on 16-byte boundary. */
- desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
+ desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
if (!priv->rx_skb[q][entry]) {
skb = netdev_alloc_skb(ndev,
@@ -643,7 +657,7 @@ static int ravb_stop_dma(struct net_device *ndev)
}
/* E-MAC interrupt handler */
-static void ravb_emac_interrupt(struct net_device *ndev)
+static void ravb_emac_interrupt_unlocked(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
u32 ecsr, psr;
@@ -669,6 +683,18 @@ static void ravb_emac_interrupt(struct net_device *ndev)
}
}
+static irqreturn_t ravb_emac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ spin_lock(&priv->lock);
+ ravb_emac_interrupt_unlocked(ndev);
+ mmiowb();
+ spin_unlock(&priv->lock);
+ return IRQ_HANDLED;
+}
+
/* Error interrupt handler */
static void ravb_error_interrupt(struct net_device *ndev)
{
@@ -695,6 +721,50 @@ static void ravb_error_interrupt(struct net_device *ndev)
}
}
+static bool ravb_queue_interrupt(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ u32 ris0 = ravb_read(ndev, RIS0);
+ u32 ric0 = ravb_read(ndev, RIC0);
+ u32 tis = ravb_read(ndev, TIS);
+ u32 tic = ravb_read(ndev, TIC);
+
+ if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) {
+ if (napi_schedule_prep(&priv->napi[q])) {
+ /* Mask RX and TX interrupts */
+ if (priv->chip_id == RCAR_GEN2) {
+ ravb_write(ndev, ric0 & ~BIT(q), RIC0);
+ ravb_write(ndev, tic & ~BIT(q), TIC);
+ } else {
+ ravb_write(ndev, BIT(q), RID0);
+ ravb_write(ndev, BIT(q), TID);
+ }
+ __napi_schedule(&priv->napi[q]);
+ } else {
+ netdev_warn(ndev,
+ "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
+ ris0, ric0);
+ netdev_warn(ndev,
+ " tx status 0x%08x, tx mask 0x%08x.\n",
+ tis, tic);
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool ravb_timestamp_interrupt(struct net_device *ndev)
+{
+ u32 tis = ravb_read(ndev, TIS);
+
+ if (tis & TIS_TFUF) {
+ ravb_write(ndev, ~TIS_TFUF, TIS);
+ ravb_get_tx_tstamp(ndev);
+ return true;
+ }
+ return false;
+}
+
static irqreturn_t ravb_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
@@ -708,63 +778,102 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
/* Received and transmitted interrupts */
if (iss & (ISS_FRS | ISS_FTS | ISS_TFUS)) {
- u32 ris0 = ravb_read(ndev, RIS0);
- u32 ric0 = ravb_read(ndev, RIC0);
- u32 tis = ravb_read(ndev, TIS);
- u32 tic = ravb_read(ndev, TIC);
int q;
/* Timestamp updated */
- if (tis & TIS_TFUF) {
- ravb_write(ndev, ~TIS_TFUF, TIS);
- ravb_get_tx_tstamp(ndev);
+ if (ravb_timestamp_interrupt(ndev))
result = IRQ_HANDLED;
- }
/* Network control and best effort queue RX/TX */
for (q = RAVB_NC; q >= RAVB_BE; q--) {
- if (((ris0 & ric0) & BIT(q)) ||
- ((tis & tic) & BIT(q))) {
- if (napi_schedule_prep(&priv->napi[q])) {
- /* Mask RX and TX interrupts */
- ric0 &= ~BIT(q);
- tic &= ~BIT(q);
- ravb_write(ndev, ric0, RIC0);
- ravb_write(ndev, tic, TIC);
- __napi_schedule(&priv->napi[q]);
- } else {
- netdev_warn(ndev,
- "ignoring interrupt, rx status 0x%08x, rx mask 0x%08x,\n",
- ris0, ric0);
- netdev_warn(ndev,
- " tx status 0x%08x, tx mask 0x%08x.\n",
- tis, tic);
- }
+ if (ravb_queue_interrupt(ndev, q))
result = IRQ_HANDLED;
- }
}
}
/* E-MAC status summary */
if (iss & ISS_MS) {
- ravb_emac_interrupt(ndev);
+ ravb_emac_interrupt_unlocked(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ /* Error status summary */
+ if (iss & ISS_ES) {
+ ravb_error_interrupt(ndev);
result = IRQ_HANDLED;
}
+ /* gPTP interrupt status summary */
+ if (iss & ISS_CGIS) {
+ ravb_ptp_interrupt(ndev);
+ result = IRQ_HANDLED;
+ }
+
+ mmiowb();
+ spin_unlock(&priv->lock);
+ return result;
+}
+
+/* Timestamp/Error/gPTP interrupt handler */
+static irqreturn_t ravb_multi_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = dev_id;
+ struct ravb_private *priv = netdev_priv(ndev);
+ irqreturn_t result = IRQ_NONE;
+ u32 iss;
+
+ spin_lock(&priv->lock);
+ /* Get interrupt status */
+ iss = ravb_read(ndev, ISS);
+
+ /* Timestamp updated */
+ if ((iss & ISS_TFUS) && ravb_timestamp_interrupt(ndev))
+ result = IRQ_HANDLED;
+
/* Error status summary */
if (iss & ISS_ES) {
ravb_error_interrupt(ndev);
result = IRQ_HANDLED;
}
- if ((iss & ISS_CGIS) && ravb_ptp_interrupt(ndev) == IRQ_HANDLED)
+ /* gPTP interrupt status summary */
+ if (iss & ISS_CGIS) {
+ ravb_ptp_interrupt(ndev);
result = IRQ_HANDLED;
+ }
mmiowb();
spin_unlock(&priv->lock);
return result;
}
+static irqreturn_t ravb_dma_interrupt(int irq, void *dev_id, int q)
+{
+ struct net_device *ndev = dev_id;
+ struct ravb_private *priv = netdev_priv(ndev);
+ irqreturn_t result = IRQ_NONE;
+
+ spin_lock(&priv->lock);
+
+ /* Network control/Best effort queue RX/TX */
+ if (ravb_queue_interrupt(ndev, q))
+ result = IRQ_HANDLED;
+
+ mmiowb();
+ spin_unlock(&priv->lock);
+ return result;
+}
+
+static irqreturn_t ravb_be_interrupt(int irq, void *dev_id)
+{
+ return ravb_dma_interrupt(irq, dev_id, RAVB_BE);
+}
+
+static irqreturn_t ravb_nc_interrupt(int irq, void *dev_id)
+{
+ return ravb_dma_interrupt(irq, dev_id, RAVB_NC);
+}
+
static int ravb_poll(struct napi_struct *napi, int budget)
{
struct net_device *ndev = napi->dev;
@@ -804,8 +913,13 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Re-enable RX/TX interrupts */
spin_lock_irqsave(&priv->lock, flags);
- ravb_modify(ndev, RIC0, mask, mask);
- ravb_modify(ndev, TIC, mask, mask);
+ if (priv->chip_id == RCAR_GEN2) {
+ ravb_modify(ndev, RIC0, mask, mask);
+ ravb_modify(ndev, TIC, mask, mask);
+ } else {
+ ravb_write(ndev, mask, RIE0);
+ ravb_write(ndev, mask, TIE);
+ }
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -891,6 +1005,7 @@ static int ravb_phy_init(struct net_device *ndev)
}
phydev = of_phy_connect(ndev, pn, ravb_adjust_link, 0,
priv->phy_interface);
+ of_node_put(pn);
if (!phydev) {
netdev_err(ndev, "failed to connect PHY\n");
return -ENOENT;
@@ -1208,35 +1323,72 @@ static const struct ethtool_ops ravb_ethtool_ops = {
.get_ts_info = ravb_get_ts_info,
};
+static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler,
+ struct net_device *ndev, struct device *dev,
+ const char *ch)
+{
+ char *name;
+ int error;
+
+ name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch);
+ if (!name)
+ return -ENOMEM;
+ error = request_irq(irq, handler, 0, name, ndev);
+ if (error)
+ netdev_err(ndev, "cannot request IRQ %s\n", name);
+
+ return error;
+}
+
/* Network device open function for Ethernet AVB */
static int ravb_open(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ struct platform_device *pdev = priv->pdev;
+ struct device *dev = &pdev->dev;
int error;
napi_enable(&priv->napi[RAVB_BE]);
napi_enable(&priv->napi[RAVB_NC]);
- error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name,
- ndev);
- if (error) {
- netdev_err(ndev, "cannot request IRQ\n");
- goto out_napi_off;
- }
-
- if (priv->chip_id == RCAR_GEN3) {
- error = request_irq(priv->emac_irq, ravb_interrupt,
- IRQF_SHARED, ndev->name, ndev);
+ if (priv->chip_id == RCAR_GEN2) {
+ error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
+ ndev->name, ndev);
if (error) {
netdev_err(ndev, "cannot request IRQ\n");
- goto out_free_irq;
+ goto out_napi_off;
}
+ } else {
+ error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev,
+ dev, "ch22:multi");
+ if (error)
+ goto out_napi_off;
+ error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev,
+ dev, "ch24:emac");
+ if (error)
+ goto out_free_irq;
+ error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt,
+ ndev, dev, "ch0:rx_be");
+ if (error)
+ goto out_free_irq_emac;
+ error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt,
+ ndev, dev, "ch18:tx_be");
+ if (error)
+ goto out_free_irq_be_rx;
+ error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt,
+ ndev, dev, "ch1:rx_nc");
+ if (error)
+ goto out_free_irq_be_tx;
+ error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt,
+ ndev, dev, "ch19:tx_nc");
+ if (error)
+ goto out_free_irq_nc_rx;
}
/* Device init */
error = ravb_dmac_init(ndev);
if (error)
- goto out_free_irq2;
+ goto out_free_irq_nc_tx;
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
@@ -1256,9 +1408,18 @@ out_ptp_stop:
/* Stop PTP Clock driver */
if (priv->chip_id == RCAR_GEN2)
ravb_ptp_stop(ndev);
-out_free_irq2:
- if (priv->chip_id == RCAR_GEN3)
- free_irq(priv->emac_irq, ndev);
+out_free_irq_nc_tx:
+ if (priv->chip_id == RCAR_GEN2)
+ goto out_free_irq;
+ free_irq(priv->tx_irqs[RAVB_NC], ndev);
+out_free_irq_nc_rx:
+ free_irq(priv->rx_irqs[RAVB_NC], ndev);
+out_free_irq_be_tx:
+ free_irq(priv->tx_irqs[RAVB_BE], ndev);
+out_free_irq_be_rx:
+ free_irq(priv->rx_irqs[RAVB_BE], ndev);
+out_free_irq_emac:
+ free_irq(priv->emac_irq, ndev);
out_free_irq:
free_irq(ndev->irq, ndev);
out_napi_off:
@@ -1506,8 +1667,13 @@ static int ravb_close(struct net_device *ndev)
priv->phydev = NULL;
}
- if (priv->chip_id == RCAR_GEN3)
+ if (priv->chip_id != RCAR_GEN2) {
+ free_irq(priv->tx_irqs[RAVB_NC], ndev);
+ free_irq(priv->rx_irqs[RAVB_NC], ndev);
+ free_irq(priv->tx_irqs[RAVB_BE], ndev);
+ free_irq(priv->rx_irqs[RAVB_BE], ndev);
free_irq(priv->emac_irq, ndev);
+ }
free_irq(ndev->irq, ndev);
napi_disable(&priv->napi[RAVB_NC]);
@@ -1718,6 +1884,7 @@ static int ravb_probe(struct platform_device *pdev)
struct net_device *ndev;
int error, irq, q;
struct resource *res;
+ int i;
if (!np) {
dev_err(&pdev->dev,
@@ -1742,7 +1909,6 @@ static int ravb_probe(struct platform_device *pdev)
/* The Ether-specific entries in the device structure. */
ndev->base_addr = res->start;
- ndev->dma = -1;
chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev);
@@ -1787,6 +1953,22 @@ static int ravb_probe(struct platform_device *pdev)
goto out_release;
}
priv->emac_irq = irq;
+ for (i = 0; i < NUM_RX_QUEUE; i++) {
+ irq = platform_get_irq_byname(pdev, ravb_rx_irqs[i]);
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ priv->rx_irqs[i] = irq;
+ }
+ for (i = 0; i < NUM_TX_QUEUE; i++) {
+ irq = platform_get_irq_byname(pdev, ravb_tx_irqs[i]);
+ if (irq < 0) {
+ error = irq;
+ goto out_release;
+ }
+ priv->tx_irqs[i] = irq;
+ }
}
priv->chip_id = chip_id;
@@ -1928,8 +2110,7 @@ static int ravb_runtime_nop(struct device *dev)
}
static const struct dev_pm_ops ravb_dev_pm_ops = {
- .runtime_suspend = ravb_runtime_nop,
- .runtime_resume = ravb_runtime_nop,
+ SET_RUNTIME_PM_OPS(ravb_runtime_nop, ravb_runtime_nop, NULL)
};
#define RAVB_PM_OPS (&ravb_dev_pm_ops)
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index 57992ccc4657..eede70ec37f8 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -194,7 +194,12 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp,
priv->ptp.extts[req->index] = on;
spin_lock_irqsave(&priv->lock, flags);
- ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
+ if (priv->chip_id == RCAR_GEN2)
+ ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0);
+ else if (on)
+ ravb_write(ndev, GIE_PTCS, GIE);
+ else
+ ravb_write(ndev, GID_PTCD, GID);
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -241,7 +246,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
error = ravb_ptp_update_compare(priv, (u32)start_ns);
if (!error) {
/* Unmask interrupt */
- ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
+ if (priv->chip_id == RCAR_GEN2)
+ ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME);
+ else
+ ravb_write(ndev, GIE_PTMS0, GIE);
}
} else {
spin_lock_irqsave(&priv->lock, flags);
@@ -250,7 +258,10 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp,
perout->period = 0;
/* Mask interrupt */
- ravb_modify(ndev, GIC, GIC_PTME, 0);
+ if (priv->chip_id == RCAR_GEN2)
+ ravb_modify(ndev, GIC, GIC_PTME, 0);
+ else
+ ravb_write(ndev, GID_PTMD0, GID);
}
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -285,7 +296,7 @@ static const struct ptp_clock_info ravb_ptp_info = {
};
/* Caller must hold the lock */
-irqreturn_t ravb_ptp_interrupt(struct net_device *ndev)
+void ravb_ptp_interrupt(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
u32 gis = ravb_read(ndev, GIS);
@@ -308,12 +319,7 @@ irqreturn_t ravb_ptp_interrupt(struct net_device *ndev)
}
}
- if (gis) {
- ravb_write(ndev, ~gis, GIS);
- return IRQ_HANDLED;
- }
-
- return IRQ_NONE;
+ ravb_write(ndev, ~gis, GIS);
}
void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index ceea74cc2229..054e795df90f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -201,9 +201,14 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
[ARSTR] = 0x0000,
[TSU_CTRST] = 0x0004,
+ [TSU_FWSLC] = 0x0038,
[TSU_VTAG0] = 0x0058,
[TSU_ADSBSY] = 0x0060,
[TSU_TEN] = 0x0064,
+ [TSU_POST1] = 0x0070,
+ [TSU_POST2] = 0x0074,
+ [TSU_POST3] = 0x0078,
+ [TSU_POST4] = 0x007c,
[TSU_ADRH0] = 0x0100,
[TXNLCR0] = 0x0080,
@@ -482,7 +487,7 @@ static void sh_eth_chip_reset(struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
/* reset device */
- sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
+ sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
mdelay(1);
}
@@ -537,11 +542,7 @@ static struct sh_eth_cpu_data r7s72100_data = {
static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
{
- struct sh_eth_private *mdp = netdev_priv(ndev);
-
- /* reset device */
- sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
- mdelay(1);
+ sh_eth_chip_reset(ndev);
sh_eth_select_mii(ndev);
}
@@ -725,8 +726,8 @@ static struct sh_eth_cpu_data sh7757_data = {
#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
static void sh_eth_chip_reset_giga(struct net_device *ndev)
{
- int i;
u32 mahr[2], malr[2];
+ int i;
/* save MAHR and MALR */
for (i = 0; i < 2; i++) {
@@ -734,9 +735,7 @@ static void sh_eth_chip_reset_giga(struct net_device *ndev)
mahr[i] = ioread32((void *)GIGA_MAHR(i));
}
- /* reset device */
- iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
- mdelay(1);
+ sh_eth_chip_reset(ndev);
/* restore MAHR and MALR */
for (i = 0; i < 2; i++) {
@@ -899,7 +898,7 @@ static int sh_eth_check_reset(struct net_device *ndev)
int cnt = 100;
while (cnt > 0) {
- if (!(sh_eth_read(ndev, EDMR) & 0x3))
+ if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
break;
mdelay(1);
cnt--;
@@ -1229,7 +1228,7 @@ ring_free:
return -ENOMEM;
}
-static int sh_eth_dev_init(struct net_device *ndev, bool start)
+static int sh_eth_dev_init(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
int ret;
@@ -1279,10 +1278,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
RFLR);
sh_eth_modify(ndev, EESR, 0, 0);
- if (start) {
- mdp->irq_enabled = true;
- sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
- }
+ mdp->irq_enabled = true;
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
/* PAUSE Prohibition */
sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
@@ -1295,8 +1292,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
/* E-MAC Interrupt Enable register */
- if (start)
- sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
+ sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
/* Set MAC address */
update_mac_address(ndev);
@@ -1309,10 +1305,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
if (mdp->cd->tpauser)
sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
- if (start) {
- /* Setting the Rx mode will start the Rx process. */
- sh_eth_write(ndev, EDRRR_R, EDRRR);
- }
+ /* Setting the Rx mode will start the Rx process. */
+ sh_eth_write(ndev, EDRRR_R, EDRRR);
return ret;
}
@@ -1791,6 +1785,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
sh_eth_adjust_link, 0,
mdp->phy_interface);
+ of_node_put(pn);
if (!phydev)
phydev = ERR_PTR(-ENOENT);
} else {
@@ -2194,7 +2189,7 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
__func__);
return ret;
}
- ret = sh_eth_dev_init(ndev, true);
+ ret = sh_eth_dev_init(ndev);
if (ret < 0) {
netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
__func__);
@@ -2246,7 +2241,7 @@ static int sh_eth_open(struct net_device *ndev)
goto out_free_irq;
/* device init */
- ret = sh_eth_dev_init(ndev, true);
+ ret = sh_eth_dev_init(ndev);
if (ret)
goto out_free_irq;
@@ -2299,7 +2294,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
}
/* device init */
- sh_eth_dev_init(ndev, true);
+ sh_eth_dev_init(ndev);
netif_start_queue(ndev);
}
@@ -2796,6 +2791,8 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
{
if (sh_eth_is_rz_fast_ether(mdp)) {
sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
+ sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
+ TSU_FWSLC); /* Enable POST registers */
return;
}
@@ -3007,7 +3004,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
if (devno < 0)
devno = 0;
- ndev->dma = -1;
ret = platform_get_irq(pdev, 0);
if (ret < 0)
goto out_release;
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 8fa4ef3a7fdd..c62380e34a1d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -394,7 +394,7 @@ enum RPADIR_BIT {
#define DEFAULT_FDR_INIT 0x00000707
/* ARSTR */
-enum ARSTR_BIT { ARSTR_ARSTR = 0x00000001, };
+enum ARSTR_BIT { ARSTR_ARST = 0x00000001, };
/* TSU_FWEN0 */
enum TSU_FWEN0_BIT {
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 28b775e5a9ad..f0b09b05ed3f 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1996,7 +1996,8 @@ static int rocker_port_change_proto_down(struct net_device *dev,
return 0;
}
-static void rocker_port_neigh_destroy(struct neighbour *n)
+static void rocker_port_neigh_destroy(struct net_device *dev,
+ struct neighbour *n)
{
struct rocker_port *rocker_port = netdev_priv(n->dev);
int err;
diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c
index 0e758bcb26b0..1ca796316173 100644
--- a/drivers/net/ethernet/rocker/rocker_ofdpa.c
+++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c
@@ -2727,7 +2727,7 @@ static int ofdpa_port_obj_fib4_add(struct rocker_port *rocker_port,
return ofdpa_port_fib_ipv4(ofdpa_port, trans,
htonl(fib4->dst), fib4->dst_len,
- &fib4->fi, fib4->tb_id, 0);
+ fib4->fi, fib4->tb_id, 0);
}
static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port,
@@ -2737,7 +2737,7 @@ static int ofdpa_port_obj_fib4_del(struct rocker_port *rocker_port,
return ofdpa_port_fib_ipv4(ofdpa_port, NULL,
htonl(fib4->dst), fib4->dst_len,
- &fib4->fi, fib4->tb_id,
+ fib4->fi, fib4->tb_id,
OFDPA_OP_FLAG_REMOVE);
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index 45019649bbbd..5cb51b609f02 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -475,7 +475,6 @@ struct sxgbe_priv_data {
int rxcsum_insertion;
spinlock_t stats_lock; /* lock for tx/rx statatics */
- struct phy_device *phydev;
int oldlink;
int speed;
int oldduplex;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index c0981ae45874..542b67d436df 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -147,7 +147,7 @@ static int sxgbe_get_eee(struct net_device *dev,
edata->eee_active = priv->eee_active;
edata->tx_lpi_timer = priv->tx_lpi_timer;
- return phy_ethtool_get_eee(priv->phydev, edata);
+ return phy_ethtool_get_eee(dev->phydev, edata);
}
static int sxgbe_set_eee(struct net_device *dev,
@@ -172,7 +172,7 @@ static int sxgbe_set_eee(struct net_device *dev,
priv->tx_lpi_timer = edata->tx_lpi_timer;
}
- return phy_ethtool_set_eee(priv->phydev, edata);
+ return phy_ethtool_set_eee(dev->phydev, edata);
}
static void sxgbe_getdrvinfo(struct net_device *dev,
@@ -182,27 +182,6 @@ static void sxgbe_getdrvinfo(struct net_device *dev,
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
-static int sxgbe_getsettings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
-
- if (priv->phydev)
- return phy_ethtool_gset(priv->phydev, cmd);
-
- return -EOPNOTSUPP;
-}
-
-static int sxgbe_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
-
- if (priv->phydev)
- return phy_ethtool_sset(priv->phydev, cmd);
-
- return -EOPNOTSUPP;
-}
-
static u32 sxgbe_getmsglevel(struct net_device *dev)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
@@ -255,7 +234,7 @@ static void sxgbe_get_ethtool_stats(struct net_device *dev,
char *p;
if (priv->eee_enabled) {
- int val = phy_get_eee_err(priv->phydev);
+ int val = phy_get_eee_err(dev->phydev);
if (val)
priv->xstats.eee_wakeup_error_n = val;
@@ -499,8 +478,6 @@ static int sxgbe_get_regs_len(struct net_device *dev)
static const struct ethtool_ops sxgbe_ethtool_ops = {
.get_drvinfo = sxgbe_getdrvinfo,
- .get_settings = sxgbe_getsettings,
- .set_settings = sxgbe_setsettings,
.get_msglevel = sxgbe_getmsglevel,
.set_msglevel = sxgbe_setmsglevel,
.get_link = ethtool_op_get_link,
@@ -516,6 +493,8 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
.get_regs_len = sxgbe_get_regs_len,
.get_eee = sxgbe_get_eee,
.set_eee = sxgbe_set_eee,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
void sxgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 413ea14ab91f..ea44a2456ce1 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -124,12 +124,13 @@ static void sxgbe_eee_ctrl_timer(unsigned long arg)
*/
bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
{
+ struct net_device *ndev = priv->dev;
bool ret = false;
/* MAC core supports the EEE feature. */
if (priv->hw_cap.eee) {
/* Check if the PHY supports EEE */
- if (phy_init_eee(priv->phydev, 1))
+ if (phy_init_eee(ndev->phydev, 1))
return false;
priv->eee_active = 1;
@@ -152,12 +153,14 @@ bool sxgbe_eee_init(struct sxgbe_priv_data * const priv)
static void sxgbe_eee_adjust(const struct sxgbe_priv_data *priv)
{
+ struct net_device *ndev = priv->dev;
+
/* When the EEE has been already initialised we have to
* modify the PLS bit in the LPI ctrl & status reg according
* to the PHY link status. For this reason.
*/
if (priv->eee_enabled)
- priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
+ priv->hw->mac->set_eee_pls(priv->ioaddr, ndev->phydev->link);
}
/**
@@ -203,7 +206,7 @@ static inline u32 sxgbe_tx_avail(struct sxgbe_tx_queue *queue, int tx_qsize)
static void sxgbe_adjust_link(struct net_device *dev)
{
struct sxgbe_priv_data *priv = netdev_priv(dev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = dev->phydev;
u8 new_state = 0;
u8 speed = 0xff;
@@ -306,9 +309,6 @@ static int sxgbe_init_phy(struct net_device *ndev)
netdev_dbg(ndev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
__func__, phydev->phy_id, phydev->link);
- /* save phy device in private structure */
- priv->phydev = phydev;
-
return 0;
}
@@ -1173,8 +1173,8 @@ static int sxgbe_open(struct net_device *dev)
priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES);
priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES);
- if (priv->phydev)
- phy_start(priv->phydev);
+ if (dev->phydev)
+ phy_start(dev->phydev);
/* initialise TX coalesce parameters */
sxgbe_tx_init_coalesce(priv);
@@ -1194,8 +1194,8 @@ static int sxgbe_open(struct net_device *dev)
init_error:
free_dma_desc_resources(priv);
- if (priv->phydev)
- phy_disconnect(priv->phydev);
+ if (dev->phydev)
+ phy_disconnect(dev->phydev);
phy_error:
clk_disable_unprepare(priv->sxgbe_clk);
@@ -1216,10 +1216,9 @@ static int sxgbe_release(struct net_device *dev)
del_timer_sync(&priv->eee_ctrl_timer);
/* Stop and disconnect the PHY */
- if (priv->phydev) {
- phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
+ if (dev->phydev) {
+ phy_stop(dev->phydev);
+ phy_disconnect(dev->phydev);
}
netif_tx_stop_all_queues(dev);
@@ -1969,7 +1968,6 @@ static void sxgbe_poll_controller(struct net_device *dev)
*/
static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
int ret = -EOPNOTSUPP;
if (!netif_running(dev))
@@ -1979,9 +1977,9 @@ static int sxgbe_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
- if (!priv->phydev)
+ if (!dev->phydev)
return -EINVAL;
- ret = phy_mii_ioctl(priv->phydev, rq, cmd);
+ ret = phy_mii_ioctl(dev->phydev, rq, cmd);
break;
default:
break;
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index ca7336605748..c2bd5378ffda 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -572,7 +572,7 @@ static inline int sgiseeq_reset(struct net_device *dev)
if (err)
return err;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
return 0;
@@ -648,7 +648,7 @@ static void timeout(struct net_device *dev)
printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
sgiseeq_reset(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 1681084cc96f..e00a669e9e09 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -50,14 +50,34 @@ enum {
#define HUNT_FILTER_TBL_ROWS 8192
#define EFX_EF10_FILTER_ID_INVALID 0xffff
+
+#define EFX_EF10_FILTER_DEV_UC_MAX 32
+#define EFX_EF10_FILTER_DEV_MC_MAX 256
+
+/* VLAN list entry */
+struct efx_ef10_vlan {
+ struct list_head list;
+ u16 vid;
+};
+
+/* Per-VLAN filters information */
+struct efx_ef10_filter_vlan {
+ struct list_head list;
+ u16 vid;
+ u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
+ u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
+ u16 ucdef;
+ u16 bcast;
+ u16 mcdef;
+};
+
struct efx_ef10_dev_addr {
u8 addr[ETH_ALEN];
- u16 id;
};
struct efx_ef10_filter_table {
-/* The RX match field masks supported by this fw & hw, in order of priority */
- enum efx_filter_match_flags rx_match_flags[
+/* The MCDI match masks supported by this fw & hw, in order of priority */
+ u32 rx_match_mcdi_flags[
MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
unsigned int rx_match_count;
@@ -73,16 +93,16 @@ struct efx_ef10_filter_table {
} *entry;
wait_queue_head_t waitq;
/* Shadow of net_device address lists, guarded by mac_lock */
-#define EFX_EF10_FILTER_DEV_UC_MAX 32
-#define EFX_EF10_FILTER_DEV_MC_MAX 256
struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
int dev_uc_count;
int dev_mc_count;
-/* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */
- u16 ucdef_id;
- u16 bcast_id;
- u16 mcdef_id;
+ bool uc_promisc;
+ bool mc_promisc;
+/* Whether in multicast promiscuous mode when last changed */
+ bool mc_promisc_last;
+ bool vlan_filter;
+ struct list_head vlan_list;
};
/* An arbitrary search limit for the software hash table */
@@ -90,6 +110,10 @@ struct efx_ef10_filter_table {
static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
static void efx_ef10_filter_table_remove(struct efx_nic *efx);
+static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid);
+static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
+ struct efx_ef10_filter_vlan *vlan);
+static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid);
static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
{
@@ -275,6 +299,131 @@ static ssize_t efx_ef10_show_primary_flag(struct device *dev,
? 1 : 0);
}
+static struct efx_ef10_vlan *efx_ef10_find_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_vlan *vlan;
+
+ WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
+
+ list_for_each_entry(vlan, &nic_data->vlan_list, list) {
+ if (vlan->vid == vid)
+ return vlan;
+ }
+
+ return NULL;
+}
+
+static int efx_ef10_add_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_vlan *vlan;
+ int rc;
+
+ mutex_lock(&nic_data->vlan_lock);
+
+ vlan = efx_ef10_find_vlan(efx, vid);
+ if (vlan) {
+ /* We add VID 0 on init. 8021q adds it on module init
+ * for all interfaces with VLAN filtring feature.
+ */
+ if (vid == 0)
+ goto done_unlock;
+ netif_warn(efx, drv, efx->net_dev,
+ "VLAN %u already added\n", vid);
+ rc = -EALREADY;
+ goto fail_exist;
+ }
+
+ rc = -ENOMEM;
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ goto fail_alloc;
+
+ vlan->vid = vid;
+
+ list_add_tail(&vlan->list, &nic_data->vlan_list);
+
+ if (efx->filter_state) {
+ mutex_lock(&efx->mac_lock);
+ down_write(&efx->filter_sem);
+ rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
+ up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+ if (rc)
+ goto fail_filter_add_vlan;
+ }
+
+done_unlock:
+ mutex_unlock(&nic_data->vlan_lock);
+ return 0;
+
+fail_filter_add_vlan:
+ list_del(&vlan->list);
+ kfree(vlan);
+fail_alloc:
+fail_exist:
+ mutex_unlock(&nic_data->vlan_lock);
+ return rc;
+}
+
+static void efx_ef10_del_vlan_internal(struct efx_nic *efx,
+ struct efx_ef10_vlan *vlan)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ WARN_ON(!mutex_is_locked(&nic_data->vlan_lock));
+
+ if (efx->filter_state) {
+ down_write(&efx->filter_sem);
+ efx_ef10_filter_del_vlan(efx, vlan->vid);
+ up_write(&efx->filter_sem);
+ }
+
+ list_del(&vlan->list);
+ kfree(vlan);
+}
+
+static int efx_ef10_del_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_vlan *vlan;
+ int rc = 0;
+
+ /* 8021q removes VID 0 on module unload for all interfaces
+ * with VLAN filtering feature. We need to keep it to receive
+ * untagged traffic.
+ */
+ if (vid == 0)
+ return 0;
+
+ mutex_lock(&nic_data->vlan_lock);
+
+ vlan = efx_ef10_find_vlan(efx, vid);
+ if (!vlan) {
+ netif_err(efx, drv, efx->net_dev,
+ "VLAN %u to be deleted not found\n", vid);
+ rc = -ENOENT;
+ } else {
+ efx_ef10_del_vlan_internal(efx, vlan);
+ }
+
+ mutex_unlock(&nic_data->vlan_lock);
+
+ return rc;
+}
+
+static void efx_ef10_cleanup_vlans(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct efx_ef10_vlan *vlan, *next_vlan;
+
+ mutex_lock(&nic_data->vlan_lock);
+ list_for_each_entry_safe(vlan, next_vlan, &nic_data->vlan_list, list)
+ efx_ef10_del_vlan_internal(efx, vlan);
+ mutex_unlock(&nic_data->vlan_lock);
+}
+
static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
NULL);
static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
@@ -421,8 +570,30 @@ static int efx_ef10_probe(struct efx_nic *efx)
#endif
ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
+ INIT_LIST_HEAD(&nic_data->vlan_list);
+ mutex_init(&nic_data->vlan_lock);
+
+ /* Add unspecified VID to support VLAN filtering being disabled */
+ rc = efx_ef10_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
+ if (rc)
+ goto fail_add_vid_unspec;
+
+ /* If VLAN filtering is enabled, we need VID 0 to get untagged
+ * traffic. It is added automatically if 8021q module is loaded,
+ * but we can't rely on it since module may be not loaded.
+ */
+ rc = efx_ef10_add_vlan(efx, 0);
+ if (rc)
+ goto fail_add_vid_0;
+
return 0;
+fail_add_vid_0:
+ efx_ef10_cleanup_vlans(efx);
+fail_add_vid_unspec:
+ mutex_destroy(&nic_data->vlan_lock);
+ efx_ptp_remove(efx);
+ efx_mcdi_mon_remove(efx);
fail5:
device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
fail4:
@@ -619,6 +790,17 @@ fail:
return rc;
}
+static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
+{
+ struct efx_channel *channel;
+ struct efx_tx_queue *tx_queue;
+
+ /* All our existing PIO buffers went away */
+ efx_for_each_channel(channel, efx)
+ efx_for_each_channel_tx_queue(tx_queue, channel)
+ tx_queue->piobuf = NULL;
+}
+
#else /* !EFX_USE_PIO */
static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
@@ -635,6 +817,10 @@ static void efx_ef10_free_piobufs(struct efx_nic *efx)
{
}
+static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
+{
+}
+
#endif /* EFX_USE_PIO */
static void efx_ef10_remove(struct efx_nic *efx)
@@ -661,6 +847,9 @@ static void efx_ef10_remove(struct efx_nic *efx)
}
#endif
+ efx_ef10_cleanup_vlans(efx);
+ mutex_destroy(&nic_data->vlan_lock);
+
efx_ptp_remove(efx);
efx_mcdi_mon_remove(efx);
@@ -689,6 +878,45 @@ static int efx_ef10_probe_pf(struct efx_nic *efx)
return efx_ef10_probe(efx);
}
+int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
+ u32 *port_flags, u32 *vadaptor_flags,
+ unsigned int *vlan_tags)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_QUERY_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_VADAPTOR_QUERY_OUT_LEN);
+ size_t outlen;
+ int rc;
+
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN)) {
+ MCDI_SET_DWORD(inbuf, VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID,
+ port_id);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_QUERY, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen < sizeof(outbuf)) {
+ rc = -EIO;
+ return rc;
+ }
+ }
+
+ if (port_flags)
+ *port_flags = MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_PORT_FLAGS);
+ if (vadaptor_flags)
+ *vadaptor_flags =
+ MCDI_DWORD(outbuf, VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS);
+ if (vlan_tags)
+ *vlan_tags =
+ MCDI_DWORD(outbuf,
+ VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS);
+
+ return 0;
+}
+
int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
@@ -1018,6 +1246,7 @@ static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
nic_data->must_realloc_vis = true;
nic_data->must_restore_filters = true;
nic_data->must_restore_piobufs = true;
+ efx_ef10_forget_old_piobufs(efx);
nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
/* Driver-created vswitches and vports must be re-created */
@@ -1288,13 +1517,14 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
}
#if BITS_PER_LONG == 64
+ BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
mask[0] = raw_mask[0];
mask[1] = raw_mask[1];
#else
+ BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
mask[0] = raw_mask[0] & 0xffffffff;
mask[1] = raw_mask[0] >> 32;
mask[2] = raw_mask[1] & 0xffffffff;
- mask[3] = raw_mask[1] >> 32;
#endif
}
@@ -3024,15 +3254,55 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
return rc;
}
-static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
- enum efx_filter_match_flags match_flags)
+static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
{
+ unsigned int match_flags = spec->match_flags;
+ u32 mcdi_flags = 0;
+
+ if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
+ match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
+ mcdi_flags |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) :
+ (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN);
+ }
+
+#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field) { \
+ unsigned int old_match_flags = match_flags; \
+ match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
+ if (match_flags != old_match_flags) \
+ mcdi_flags |= \
+ (1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN); \
+ }
+ MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP);
+ MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC);
+ MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT);
+ MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE);
+ MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN);
+ MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO);
+#undef MAP_FILTER_TO_MCDI_FLAG
+
+ /* Did we map them all? */
+ WARN_ON_ONCE(match_flags);
+
+ return mcdi_flags;
+}
+
+static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
+ const struct efx_filter_spec *spec)
+{
+ u32 mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
unsigned int match_pri;
for (match_pri = 0;
match_pri < table->rx_match_count;
match_pri++)
- if (table->rx_match_flags[match_pri] == match_flags)
+ if (table->rx_match_mcdi_flags[match_pri] == mcdi_flags)
return match_pri;
return -EPROTONOSUPPORT;
@@ -3058,7 +3328,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
EFX_FILTER_FLAG_RX)
return -EINVAL;
- rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
+ rc = efx_ef10_filter_pri(table, spec);
if (rc < 0)
return rc;
match_pri = rc;
@@ -3297,7 +3567,7 @@ static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (!spec ||
(!by_index &&
- efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
+ efx_ef10_filter_pri(table, spec) !=
filter_id / HUNT_FILTER_TBL_ROWS)) {
rc = -ENOENT;
goto out_unlock;
@@ -3378,12 +3648,13 @@ static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
return filter_id % HUNT_FILTER_TBL_ROWS;
}
-static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
- enum efx_filter_priority priority,
- u32 filter_id)
+static void efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
{
- return efx_ef10_filter_remove_internal(efx, 1U << priority,
- filter_id, true);
+ if (filter_id == EFX_EF10_FILTER_ID_INVALID)
+ return;
+ efx_ef10_filter_remove_internal(efx, 1U << priority, filter_id, true);
}
static int efx_ef10_filter_get_safe(struct efx_nic *efx,
@@ -3398,7 +3669,7 @@ static int efx_ef10_filter_get_safe(struct efx_nic *efx,
spin_lock_bh(&efx->filter_lock);
saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
if (saved_spec && saved_spec->priority == priority &&
- efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
+ efx_ef10_filter_pri(table, saved_spec) ==
filter_id / HUNT_FILTER_TBL_ROWS) {
*spec = *saved_spec;
rc = 0;
@@ -3471,8 +3742,7 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
count = -EMSGSIZE;
break;
}
- buf[count++] = (efx_ef10_filter_rx_match_pri(
- table, spec->match_flags) *
+ buf[count++] = (efx_ef10_filter_pri(table, spec) *
HUNT_FILTER_TBL_ROWS +
filter_idx);
}
@@ -3708,15 +3978,58 @@ static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
return match_flags;
}
+static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_filter_vlan *vlan, *next_vlan;
+
+ /* See comment in efx_ef10_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ if (!table)
+ return;
+
+ list_for_each_entry_safe(vlan, next_vlan, &table->vlan_list, list)
+ efx_ef10_filter_del_vlan_internal(efx, vlan);
+}
+
+static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
+ enum efx_filter_match_flags match_flags)
+{
+ unsigned int match_pri;
+ int mf;
+
+ for (match_pri = 0;
+ match_pri < table->rx_match_count;
+ match_pri++) {
+ mf = efx_ef10_filter_match_flags_from_mcdi(
+ table->rx_match_mcdi_flags[match_pri]);
+ if (mf == match_flags)
+ return true;
+ }
+
+ return false;
+}
+
static int efx_ef10_filter_table_probe(struct efx_nic *efx)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct net_device *net_dev = efx->net_dev;
unsigned int pd_match_pri, pd_match_count;
struct efx_ef10_filter_table *table;
+ struct efx_ef10_vlan *vlan;
size_t outlen;
int rc;
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return -EINVAL;
+
+ if (efx->filter_state) /* already probed */
+ return 0;
+
table = kzalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
@@ -3749,24 +4062,48 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
"%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
__func__, mcdi_flags, pd_match_pri,
rc, table->rx_match_count);
- table->rx_match_flags[table->rx_match_count++] = rc;
+ table->rx_match_mcdi_flags[table->rx_match_count] = mcdi_flags;
+ table->rx_match_count++;
}
}
+ if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
+ !(efx_ef10_filter_match_supported(table,
+ (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
+ efx_ef10_filter_match_supported(table,
+ (EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
+ netif_info(efx, probe, net_dev,
+ "VLAN filters are not supported in this firmware variant\n");
+ net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ net_dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ }
+
table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
if (!table->entry) {
rc = -ENOMEM;
goto fail;
}
- table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
- table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
- table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+ table->mc_promisc_last = false;
+ table->vlan_filter =
+ !!(efx->net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ INIT_LIST_HEAD(&table->vlan_list);
efx->filter_state = table;
init_waitqueue_head(&table->waitq);
+
+ list_for_each_entry(vlan, &nic_data->vlan_list, list) {
+ rc = efx_ef10_filter_add_vlan(efx, vlan->vid);
+ if (rc)
+ goto fail_add_vlan;
+ }
+
return 0;
+fail_add_vlan:
+ efx_ef10_filter_cleanup_vlans(efx);
+ efx->filter_state = NULL;
fail:
kfree(table);
return rc;
@@ -3827,7 +4164,6 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
nic_data->must_restore_filters = false;
}
-/* Caller must hold efx->filter_sem for write */
static void efx_ef10_filter_table_remove(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
@@ -3836,7 +4172,17 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
unsigned int filter_idx;
int rc;
+ efx_ef10_filter_cleanup_vlans(efx);
efx->filter_state = NULL;
+ /* If we were called without locking, then it's not safe to free
+ * the table as others might be using it. So we just WARN, leak
+ * the memory, and potentially get an inconsistent filter table
+ * state.
+ * This should never actually happen.
+ */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
if (!table)
return;
@@ -3864,37 +4210,54 @@ static void efx_ef10_filter_table_remove(struct efx_nic *efx)
kfree(table);
}
-#define EFX_EF10_FILTER_DO_MARK_OLD(id) \
- if (id != EFX_EF10_FILTER_ID_INVALID) { \
- filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
- if (!table->entry[filter_idx].spec) \
- netif_dbg(efx, drv, efx->net_dev, \
- "%s: marked null spec old %04x:%04x\n", \
- __func__, id, filter_idx); \
- table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;\
- }
-static void efx_ef10_filter_mark_old(struct efx_nic *efx)
+static void efx_ef10_filter_mark_one_old(struct efx_nic *efx, uint16_t *id)
{
struct efx_ef10_filter_table *table = efx->filter_state;
- unsigned int filter_idx, i;
+ unsigned int filter_idx;
- if (!table)
- return;
+ if (*id != EFX_EF10_FILTER_ID_INVALID) {
+ filter_idx = efx_ef10_filter_get_unsafe_id(efx, *id);
+ if (!table->entry[filter_idx].spec)
+ netif_dbg(efx, drv, efx->net_dev,
+ "marked null spec old %04x:%04x\n", *id,
+ filter_idx);
+ table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD;
+ *id = EFX_EF10_FILTER_ID_INVALID;
+ }
+}
+
+/* Mark old per-VLAN filters that may need to be removed */
+static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
+ struct efx_ef10_filter_vlan *vlan)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ unsigned int i;
- /* Mark old filters that may need to be removed */
- spin_lock_bh(&efx->filter_lock);
for (i = 0; i < table->dev_uc_count; i++)
- EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id);
+ efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
for (i = 0; i < table->dev_mc_count; i++)
- EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id);
- EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id);
- EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id);
- EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id);
+ efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
+ efx_ef10_filter_mark_one_old(efx, &vlan->ucdef);
+ efx_ef10_filter_mark_one_old(efx, &vlan->bcast);
+ efx_ef10_filter_mark_one_old(efx, &vlan->mcdef);
+}
+
+/* Mark old filters that may need to be removed.
+ * Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
+static void efx_ef10_filter_mark_old(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_filter_vlan *vlan;
+
+ spin_lock_bh(&efx->filter_lock);
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ _efx_ef10_filter_vlan_mark_old(efx, vlan);
spin_unlock_bh(&efx->filter_lock);
}
-#undef EFX_EF10_FILTER_DO_MARK_OLD
-static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
+static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct net_device *net_dev = efx->net_dev;
@@ -3902,45 +4265,38 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
int addr_count;
unsigned int i;
- table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
addr_count = netdev_uc_count(net_dev);
- if (net_dev->flags & IFF_PROMISC)
- *promisc = true;
+ table->uc_promisc = !!(net_dev->flags & IFF_PROMISC);
table->dev_uc_count = 1 + addr_count;
ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
i = 1;
netdev_for_each_uc_addr(uc, net_dev) {
if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
- *promisc = true;
+ table->uc_promisc = true;
break;
}
ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
- table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
i++;
}
}
-static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
+static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct net_device *net_dev = efx->net_dev;
struct netdev_hw_addr *mc;
unsigned int i, addr_count;
- table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
- table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
- if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
- *promisc = true;
+ table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI));
addr_count = netdev_mc_count(net_dev);
i = 0;
netdev_for_each_mc_addr(mc, net_dev) {
if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
- *promisc = true;
+ table->mc_promisc = true;
break;
}
ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
- table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
i++;
}
@@ -3948,7 +4304,8 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
}
static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
- bool multicast, bool rollback)
+ struct efx_ef10_filter_vlan *vlan,
+ bool multicast, bool rollback)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_dev_addr *addr_list;
@@ -3957,14 +4314,17 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
u8 baddr[ETH_ALEN];
unsigned int i, j;
int addr_count;
+ u16 *ids;
int rc;
if (multicast) {
addr_list = table->dev_mc_list;
addr_count = table->dev_mc_count;
+ ids = vlan->mc;
} else {
addr_list = table->dev_uc_list;
addr_count = table->dev_uc_count;
+ ids = vlan->uc;
}
filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
@@ -3972,8 +4332,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
/* Insert/renew filters */
for (i = 0; i < addr_count; i++) {
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
- efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- addr_list[i].addr);
+ efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
if (rollback) {
@@ -3982,12 +4341,10 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
rc);
/* Fall back to promiscuous */
for (j = 0; j < i; j++) {
- if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
- continue;
efx_ef10_filter_remove_unsafe(
efx, EFX_FILTER_PRI_AUTO,
- addr_list[j].id);
- addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+ ids[j]);
+ ids[j] = EFX_EF10_FILTER_ID_INVALID;
}
return rc;
} else {
@@ -3995,40 +4352,40 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
rc = EFX_EF10_FILTER_ID_INVALID;
}
}
- addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ ids[i] = efx_ef10_filter_get_unsafe_id(efx, rc);
}
if (multicast && rollback) {
/* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
eth_broadcast_addr(baddr);
- efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
+ efx_filter_set_eth_local(&spec, vlan->vid, baddr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
netif_warn(efx, drv, efx->net_dev,
"Broadcast filter insert failed rc=%d\n", rc);
/* Fall back to promiscuous */
for (j = 0; j < i; j++) {
- if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
- continue;
efx_ef10_filter_remove_unsafe(
efx, EFX_FILTER_PRI_AUTO,
- addr_list[j].id);
- addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
+ ids[j]);
+ ids[j] = EFX_EF10_FILTER_ID_INVALID;
}
return rc;
} else {
- table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ EFX_WARN_ON_PARANOID(vlan->bcast !=
+ EFX_EF10_FILTER_ID_INVALID);
+ vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
}
}
return 0;
}
-static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
- bool rollback)
+static int efx_ef10_filter_insert_def(struct efx_nic *efx,
+ struct efx_ef10_filter_vlan *vlan,
+ bool multicast, bool rollback)
{
- struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
enum efx_filter_flags filter_flags;
struct efx_filter_spec spec;
@@ -4044,6 +4401,9 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
else
efx_filter_set_uc_def(&spec);
+ if (vlan->vid != EFX_FILTER_VID_UNSPEC)
+ efx_filter_set_eth_local(&spec, vlan->vid, NULL);
+
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING,
@@ -4051,14 +4411,14 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
"%scast mismatch filter insert failed rc=%d\n",
multicast ? "Multi" : "Uni", rc);
} else if (multicast) {
- table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID);
+ vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc);
if (!nic_data->workaround_26807) {
/* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags, 0);
eth_broadcast_addr(baddr);
- efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
- baddr);
+ efx_filter_set_eth_local(&spec, vlan->vid, baddr);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
netif_warn(efx, drv, efx->net_dev,
@@ -4068,17 +4428,20 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
/* Roll back the mc_def filter */
efx_ef10_filter_remove_unsafe(
efx, EFX_FILTER_PRI_AUTO,
- table->mcdef_id);
- table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
+ vlan->mcdef);
+ vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
return rc;
}
} else {
- table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ EFX_WARN_ON_PARANOID(vlan->bcast !=
+ EFX_EF10_FILTER_ID_INVALID);
+ vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
}
}
rc = 0;
} else {
- table->ucdef_id = rc;
+ EFX_WARN_ON_PARANOID(vlan->ucdef != EFX_EF10_FILTER_ID_INVALID);
+ vlan->ucdef = rc;
rc = 0;
}
return rc;
@@ -4187,64 +4550,55 @@ reset_nic:
/* Caller must hold efx->filter_sem for read if race against
* efx_ef10_filter_table_remove() is possible
*/
-static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
+ struct efx_ef10_filter_vlan *vlan)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct net_device *net_dev = efx->net_dev;
- bool uc_promisc = false, mc_promisc = false;
-
- if (!efx_dev_registered(efx))
- return;
-
- if (!table)
- return;
-
- efx_ef10_filter_mark_old(efx);
- /* Copy/convert the address lists; add the primary station
- * address and broadcast address
+ /* Do not install unspecified VID if VLAN filtering is enabled.
+ * Do not install all specified VIDs if VLAN filtering is disabled.
*/
- netif_addr_lock_bh(net_dev);
- efx_ef10_filter_uc_addr_list(efx, &uc_promisc);
- efx_ef10_filter_mc_addr_list(efx, &mc_promisc);
- netif_addr_unlock_bh(net_dev);
+ if ((vlan->vid == EFX_FILTER_VID_UNSPEC) == table->vlan_filter)
+ return;
/* Insert/renew unicast filters */
- if (uc_promisc) {
- efx_ef10_filter_insert_def(efx, false, false);
- efx_ef10_filter_insert_addr_list(efx, false, false);
+ if (table->uc_promisc) {
+ efx_ef10_filter_insert_def(efx, vlan, false, false);
+ efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
} else {
/* If any of the filters failed to insert, fall back to
* promiscuous mode - add in the uc_def filter. But keep
* our individual unicast filters.
*/
- if (efx_ef10_filter_insert_addr_list(efx, false, false))
- efx_ef10_filter_insert_def(efx, false, false);
+ if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
+ efx_ef10_filter_insert_def(efx, vlan, false, false);
}
/* Insert/renew multicast filters */
/* If changing promiscuous state with cascaded multicast filters, remove
* old filters first, so that packets are dropped rather than duplicated
*/
- if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc)
+ if (nic_data->workaround_26807 &&
+ table->mc_promisc_last != table->mc_promisc)
efx_ef10_filter_remove_old(efx);
- if (mc_promisc) {
+ if (table->mc_promisc) {
if (nic_data->workaround_26807) {
/* If we failed to insert promiscuous filters, rollback
* and fall back to individual multicast filters
*/
- if (efx_ef10_filter_insert_def(efx, true, true)) {
+ if (efx_ef10_filter_insert_def(efx, vlan, true, true)) {
/* Changing promisc state, so remove old filters */
efx_ef10_filter_remove_old(efx);
- efx_ef10_filter_insert_addr_list(efx, true, false);
+ efx_ef10_filter_insert_addr_list(efx, vlan,
+ true, false);
}
} else {
/* If we failed to insert promiscuous filters, don't
* rollback. Regardless, also insert the mc_list
*/
- efx_ef10_filter_insert_def(efx, true, false);
- efx_ef10_filter_insert_addr_list(efx, true, false);
+ efx_ef10_filter_insert_def(efx, vlan, true, false);
+ efx_ef10_filter_insert_addr_list(efx, vlan, true, false);
}
} else {
/* If any filters failed to insert, rollback and fall back to
@@ -4252,17 +4606,153 @@ static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
* that fails, roll back again and insert as many of our
* individual multicast filters as we can.
*/
- if (efx_ef10_filter_insert_addr_list(efx, true, true)) {
+ if (efx_ef10_filter_insert_addr_list(efx, vlan, true, true)) {
/* Changing promisc state, so remove old filters */
if (nic_data->workaround_26807)
efx_ef10_filter_remove_old(efx);
- if (efx_ef10_filter_insert_def(efx, true, true))
- efx_ef10_filter_insert_addr_list(efx, true, false);
+ if (efx_ef10_filter_insert_def(efx, vlan, true, true))
+ efx_ef10_filter_insert_addr_list(efx, vlan,
+ true, false);
}
}
+}
+
+/* Caller must hold efx->filter_sem for read if race against
+ * efx_ef10_filter_table_remove() is possible
+ */
+static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_ef10_filter_vlan *vlan;
+ bool vlan_filter;
+
+ if (!efx_dev_registered(efx))
+ return;
+
+ if (!table)
+ return;
+
+ efx_ef10_filter_mark_old(efx);
+
+ /* Copy/convert the address lists; add the primary station
+ * address and broadcast address
+ */
+ netif_addr_lock_bh(net_dev);
+ efx_ef10_filter_uc_addr_list(efx);
+ efx_ef10_filter_mc_addr_list(efx);
+ netif_addr_unlock_bh(net_dev);
+
+ /* If VLAN filtering changes, all old filters are finally removed.
+ * Do it in advance to avoid conflicts for unicast untagged and
+ * VLAN 0 tagged filters.
+ */
+ vlan_filter = !!(net_dev->features & NETIF_F_HW_VLAN_CTAG_FILTER);
+ if (table->vlan_filter != vlan_filter) {
+ table->vlan_filter = vlan_filter;
+ efx_ef10_filter_remove_old(efx);
+ }
+
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
efx_ef10_filter_remove_old(efx);
- efx->mc_promisc = mc_promisc;
+ table->mc_promisc_last = table->mc_promisc;
+}
+
+static struct efx_ef10_filter_vlan *efx_ef10_filter_find_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_filter_vlan *vlan;
+
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
+
+ list_for_each_entry(vlan, &table->vlan_list, list) {
+ if (vlan->vid == vid)
+ return vlan;
+ }
+
+ return NULL;
+}
+
+static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_filter_table *table = efx->filter_state;
+ struct efx_ef10_filter_vlan *vlan;
+ unsigned int i;
+
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return -EINVAL;
+
+ vlan = efx_ef10_filter_find_vlan(efx, vid);
+ if (WARN_ON(vlan)) {
+ netif_err(efx, drv, efx->net_dev,
+ "VLAN %u already added\n", vid);
+ return -EALREADY;
+ }
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return -ENOMEM;
+
+ vlan->vid = vid;
+
+ for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
+ vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
+ for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
+ vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
+ vlan->ucdef = EFX_EF10_FILTER_ID_INVALID;
+ vlan->bcast = EFX_EF10_FILTER_ID_INVALID;
+ vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
+
+ list_add_tail(&vlan->list, &table->vlan_list);
+
+ if (efx_dev_registered(efx))
+ efx_ef10_filter_vlan_sync_rx_mode(efx, vlan);
+
+ return 0;
+}
+
+static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
+ struct efx_ef10_filter_vlan *vlan)
+{
+ unsigned int i;
+
+ /* See comment in efx_ef10_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ list_del(&vlan->list);
+
+ for (i = 0; i < ARRAY_SIZE(vlan->uc); i++)
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->uc[i]);
+ for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->mc[i]);
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->ucdef);
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->bcast);
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mcdef);
+
+ kfree(vlan);
+}
+
+static void efx_ef10_filter_del_vlan(struct efx_nic *efx, u16 vid)
+{
+ struct efx_ef10_filter_vlan *vlan;
+
+ /* See comment in efx_ef10_filter_table_remove() */
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return;
+
+ vlan = efx_ef10_filter_find_vlan(efx, vid);
+ if (!vlan) {
+ netif_err(efx, drv, efx->net_dev,
+ "VLAN %u not found in filter state\n", vid);
+ return;
+ }
+
+ efx_ef10_filter_del_vlan_internal(efx, vlan);
}
static int efx_ef10_set_mac_address(struct efx_nic *efx)
@@ -4274,6 +4764,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx_device_detach_sync(efx);
efx_net_stop(efx->net_dev);
+
+ mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
efx_ef10_filter_table_remove(efx);
@@ -4286,6 +4778,8 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
efx_ef10_filter_table_probe(efx);
up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
+
if (was_enabled)
efx_net_open(efx->net_dev);
netif_device_attach(efx->net_dev);
@@ -4687,6 +5181,29 @@ static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
}
}
+static int efx_ef10_vlan_rx_add_vid(struct efx_nic *efx, __be16 proto, u16 vid)
+{
+ if (proto != htons(ETH_P_8021Q))
+ return -EINVAL;
+
+ return efx_ef10_add_vlan(efx, vid);
+}
+
+static int efx_ef10_vlan_rx_kill_vid(struct efx_nic *efx, __be16 proto, u16 vid)
+{
+ if (proto != htons(ETH_P_8021Q))
+ return -EINVAL;
+
+ return efx_ef10_del_vlan(efx, vid);
+}
+
+#define EF10_OFFLOAD_FEATURES \
+ (NETIF_F_IP_CSUM | \
+ NETIF_F_HW_VLAN_CTAG_FILTER | \
+ NETIF_F_IPV6_CSUM | \
+ NETIF_F_RXHASH | \
+ NETIF_F_NTUPLE)
+
const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.is_vf = true,
.mem_bar = EFX_MEM_VF_BAR,
@@ -4764,6 +5281,8 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
#endif
.ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
.ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
+ .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
+ .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
#ifdef CONFIG_SFC_SRIOV
.vswitching_probe = efx_ef10_vswitching_probe_vf,
.vswitching_restore = efx_ef10_vswitching_restore_vf,
@@ -4782,8 +5301,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.always_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
- .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .offload_features = EF10_OFFLOAD_FEATURES,
.mcdi_max_ver = 2,
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
@@ -4875,6 +5393,8 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.ptp_write_host_time = efx_ef10_ptp_write_host_time,
.ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
.ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
+ .vlan_rx_add_vid = efx_ef10_vlan_rx_add_vid,
+ .vlan_rx_kill_vid = efx_ef10_vlan_rx_kill_vid,
#ifdef CONFIG_SFC_SRIOV
.sriov_configure = efx_ef10_sriov_configure,
.sriov_init = efx_ef10_sriov_init,
@@ -4903,8 +5423,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.always_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
- .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXHASH | NETIF_F_NTUPLE),
+ .offload_features = EF10_OFFLOAD_FEATURES,
.mcdi_max_ver = 2,
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 3c17f274e802..a949b9d27329 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -232,6 +232,35 @@ fail:
return rc;
}
+static int efx_ef10_vadaptor_alloc_set_features(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u32 port_flags;
+ int rc;
+
+ rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ if (rc)
+ goto fail_vadaptor_alloc;
+
+ rc = efx_ef10_vadaptor_query(efx, nic_data->vport_id,
+ &port_flags, NULL, NULL);
+ if (rc)
+ goto fail_vadaptor_query;
+
+ if (port_flags &
+ (1 << MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN))
+ efx->fixed_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ else
+ efx->fixed_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ return 0;
+
+fail_vadaptor_query:
+ efx_ef10_vadaptor_free(efx, EVB_PORT_ID_ASSIGNED);
+fail_vadaptor_alloc:
+ return rc;
+}
+
/* On top of the default firmware vswitch setup, create a VEB vswitch and
* expansion vport for use by this function.
*/
@@ -243,7 +272,7 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
if (pci_sriov_get_totalvfs(efx->pci_dev) <= 0) {
/* vswitch not needed as we have no VFs */
- efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ efx_ef10_vadaptor_alloc_set_features(efx);
return 0;
}
@@ -263,7 +292,7 @@ int efx_ef10_vswitching_probe_pf(struct efx_nic *efx)
goto fail3;
ether_addr_copy(nic_data->vport_mac, net_dev->dev_addr);
- rc = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ rc = efx_ef10_vadaptor_alloc_set_features(efx);
if (rc)
goto fail4;
@@ -282,9 +311,7 @@ fail1:
int efx_ef10_vswitching_probe_vf(struct efx_nic *efx)
{
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
-
- return efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
+ return efx_ef10_vadaptor_alloc_set_features(efx);
}
int efx_ef10_vswitching_restore_pf(struct efx_nic *efx)
@@ -554,6 +581,7 @@ int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i, u16 vlan,
efx_device_detach_sync(vf->efx);
efx_net_stop(vf->efx->net_dev);
+ mutex_lock(&vf->efx->mac_lock);
down_write(&vf->efx->filter_sem);
vf->efx->type->filter_table_remove(vf->efx);
@@ -630,6 +658,7 @@ restore_filters:
goto reset_nic_up_write;
up_write(&vf->efx->filter_sem);
+ mutex_unlock(&vf->efx->mac_lock);
up_write(&vf->efx->filter_sem);
@@ -642,9 +671,10 @@ restore_filters:
return rc;
reset_nic_up_write:
- if (vf->efx)
+ if (vf->efx) {
up_write(&vf->efx->filter_sem);
-
+ mutex_unlock(&vf->efx->mac_lock);
+ }
reset_nic:
if (vf->efx) {
netif_err(efx, drv, efx->net_dev,
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
index 6d25b92cb45e..9ceb7ef0a210 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.h
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -70,6 +70,9 @@ int efx_ef10_vport_add_mac(struct efx_nic *efx,
int efx_ef10_vport_del_mac(struct efx_nic *efx,
unsigned int port_id, u8 *mac);
int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id);
+int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
+ u32 *port_flags, u32 *vadaptor_flags,
+ unsigned int *vlan_tags);
int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id);
#endif /* EF10_SRIOV_H */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 0705ec869487..14b821b1c880 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -600,6 +600,7 @@ fail:
*/
static void efx_start_datapath(struct efx_nic *efx)
{
+ netdev_features_t old_features = efx->net_dev->features;
bool old_rx_scatter = efx->rx_scatter;
struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue;
@@ -644,6 +645,15 @@ static void efx_start_datapath(struct efx_nic *efx)
efx->rx_dma_len, efx->rx_page_buf_step,
efx->rx_bufs_per_page, efx->rx_pages_per_batch);
+ /* Restore previously fixed features in hw_features and remove
+ * features which are fixed now
+ */
+ efx->net_dev->hw_features |= efx->net_dev->features;
+ efx->net_dev->hw_features &= ~efx->fixed_features;
+ efx->net_dev->features |= efx->fixed_features;
+ if (efx->net_dev->features != old_features)
+ netdev_features_change(efx->net_dev);
+
/* RX filters may also have scatter-enabled flags */
if (efx->rx_scatter != old_rx_scatter)
efx->type->filter_update_rx_scatter(efx);
@@ -1719,6 +1729,7 @@ static int efx_probe_filters(struct efx_nic *efx)
spin_lock_init(&efx->filter_lock);
init_rwsem(&efx->filter_sem);
+ mutex_lock(&efx->mac_lock);
down_write(&efx->filter_sem);
rc = efx->type->filter_table_probe(efx);
if (rc)
@@ -1726,25 +1737,48 @@ static int efx_probe_filters(struct efx_nic *efx)
#ifdef CONFIG_RFS_ACCEL
if (efx->type->offload_features & NETIF_F_NTUPLE) {
- efx->rps_flow_id = kcalloc(efx->type->max_rx_ip_filters,
- sizeof(*efx->rps_flow_id),
- GFP_KERNEL);
- if (!efx->rps_flow_id) {
+ struct efx_channel *channel;
+ int i, success = 1;
+
+ efx_for_each_channel(channel, efx) {
+ channel->rps_flow_id =
+ kcalloc(efx->type->max_rx_ip_filters,
+ sizeof(*channel->rps_flow_id),
+ GFP_KERNEL);
+ if (!channel->rps_flow_id)
+ success = 0;
+ else
+ for (i = 0;
+ i < efx->type->max_rx_ip_filters;
+ ++i)
+ channel->rps_flow_id[i] =
+ RPS_FLOW_ID_INVALID;
+ }
+
+ if (!success) {
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
efx->type->filter_table_remove(efx);
rc = -ENOMEM;
goto out_unlock;
}
+
+ efx->rps_expire_index = efx->rps_expire_channel = 0;
}
#endif
out_unlock:
up_write(&efx->filter_sem);
+ mutex_unlock(&efx->mac_lock);
return rc;
}
static void efx_remove_filters(struct efx_nic *efx)
{
#ifdef CONFIG_RFS_ACCEL
- kfree(efx->rps_flow_id);
+ struct efx_channel *channel;
+
+ efx_for_each_channel(channel, efx)
+ kfree(channel->rps_flow_id);
#endif
down_write(&efx->filter_sem);
efx->type->filter_table_remove(efx);
@@ -2290,14 +2324,46 @@ static void efx_set_rx_mode(struct net_device *net_dev)
static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
/* If disabling RX n-tuple filtering, clear existing filters */
- if (net_dev->features & ~data & NETIF_F_NTUPLE)
- return efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+ if (net_dev->features & ~data & NETIF_F_NTUPLE) {
+ rc = efx->type->filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
+ if (rc)
+ return rc;
+ }
+
+ /* If Rx VLAN filter is changed, update filters via mac_reconfigure */
+ if ((net_dev->features ^ data) & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ /* efx_set_rx_mode() will schedule MAC work to update filters
+ * when a new features are finally set in net_dev.
+ */
+ efx_set_rx_mode(net_dev);
+ }
return 0;
}
+static int efx_vlan_rx_add_vid(struct net_device *net_dev, __be16 proto, u16 vid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->vlan_rx_add_vid)
+ return efx->type->vlan_rx_add_vid(efx, proto, vid);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int efx_vlan_rx_kill_vid(struct net_device *net_dev, __be16 proto, u16 vid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->vlan_rx_kill_vid)
+ return efx->type->vlan_rx_kill_vid(efx, proto, vid);
+ else
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops efx_netdev_ops = {
.ndo_open = efx_net_open,
.ndo_stop = efx_net_stop,
@@ -2310,6 +2376,8 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_set_mac_address = efx_set_mac_address,
.ndo_set_rx_mode = efx_set_rx_mode,
.ndo_set_features = efx_set_features,
+ .ndo_vlan_rx_add_vid = efx_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = efx_vlan_rx_kill_vid,
#ifdef CONFIG_SFC_SRIOV
.ndo_set_vf_mac = efx_sriov_set_vf_mac,
.ndo_set_vf_vlan = efx_sriov_set_vf_vlan,
@@ -3125,17 +3193,25 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
return -ENOMEM;
efx = netdev_priv(net_dev);
efx->type = (const struct efx_nic_type *) entry->driver_data;
+ efx->fixed_features |= NETIF_F_HIGHDMA;
net_dev->features |= (efx->type->offload_features | NETIF_F_SG |
- NETIF_F_HIGHDMA | NETIF_F_TSO |
- NETIF_F_RXCSUM);
+ NETIF_F_TSO | NETIF_F_RXCSUM);
if (efx->type->offload_features & (NETIF_F_IPV6_CSUM | NETIF_F_HW_CSUM))
net_dev->features |= NETIF_F_TSO6;
/* Mask for features that also apply to VLAN devices */
net_dev->vlan_features |= (NETIF_F_HW_CSUM | NETIF_F_SG |
NETIF_F_HIGHDMA | NETIF_F_ALL_TSO |
NETIF_F_RXCSUM);
- /* All offloads can be toggled */
- net_dev->hw_features = net_dev->features & ~NETIF_F_HIGHDMA;
+
+ net_dev->hw_features = net_dev->features & ~efx->fixed_features;
+
+ /* Disable VLAN filtering by default. It may be enforced if
+ * the feature is fixed (i.e. VLAN filters are required to
+ * receive VLAN tagged packets due to vPort restrictions).
+ */
+ net_dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
+ net_dev->features |= efx->fixed_features;
+
pci_set_drvdata(pci_dev, efx);
SET_NETDEV_DEV(net_dev, &pci_dev->dev);
rc = efx_init_struct(efx, pci_dev, net_dev);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 5e3f93f04e62..c3ae739e9c7a 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -274,4 +274,13 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
netif_tx_unlock_bh(dev);
}
+static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
+{
+ if (WARN_ON(down_read_trylock(sem))) {
+ up_read(sem);
+ return false;
+ }
+ return true;
+}
+
#endif /* EFX_EFX_H */
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 133e9e35be9e..4c83739d158f 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -104,7 +104,8 @@ int efx_farch_test_registers(struct efx_nic *efx,
const struct efx_farch_register_test *regs,
size_t n_regs)
{
- unsigned address = 0, i, j;
+ unsigned address = 0;
+ int i, j;
efx_oword_t mask, imask, original, reg, buf;
for (i = 0; i < n_regs; ++i) {
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 4cc772164a79..c9a5b003caaf 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -273,6 +273,9 @@
* have already installed filters. See the comment at
* MC_CMD_WORKAROUND_BUG26807. */
#define MC_CMD_ERR_FILTERS_PRESENT 0x1014
+/* The clock whose frequency you've attempted to set set
+ * doesn't exist on this NIC */
+#define MC_CMD_ERR_NO_CLOCK 0x1015
#define MC_CMD_ERR_CODE_OFST 0
@@ -292,9 +295,11 @@
/* Point to the copycode entry point. */
#define SIENA_MC_BOOTROM_COPYCODE_VEC (0x800 - 3 * 0x4)
#define HUNT_MC_BOOTROM_COPYCODE_VEC (0x8000 - 3 * 0x4)
+#define MEDFORD_MC_BOOTROM_COPYCODE_VEC (0x10000 - 3 * 0x4)
/* Points to the recovery mode entry point. */
#define SIENA_MC_BOOTROM_NOFLASH_VEC (0x800 - 2 * 0x4)
#define HUNT_MC_BOOTROM_NOFLASH_VEC (0x8000 - 2 * 0x4)
+#define MEDFORD_MC_BOOTROM_NOFLASH_VEC (0x10000 - 2 * 0x4)
/* The command set exported by the boot ROM (MCDI v0) */
#define MC_CMD_GET_VERSION_V0_SUPPORTED_FUNCS { \
@@ -686,6 +691,12 @@
#define FCDI_EVENT_CODE_PTP_STATUS 0x9
/* enum: Port id config to map MC-FC port idx */
#define FCDI_EVENT_CODE_PORT_CONFIG 0xa
+/* enum: Boot result or error code */
+#define FCDI_EVENT_CODE_BOOT_RESULT 0xb
+#define FCDI_EVENT_REBOOT_SRC_LBN 36
+#define FCDI_EVENT_REBOOT_SRC_WIDTH 8
+#define FCDI_EVENT_REBOOT_FC_FW 0x0 /* enum */
+#define FCDI_EVENT_REBOOT_FC_BOOTLOADER 0x1 /* enum */
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_OFST 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_LBN 0
#define FCDI_EVENT_ASSERT_INSTR_ADDRESS_WIDTH 32
@@ -717,6 +728,11 @@
#define FCDI_EVENT_PORT_CONFIG_DATA_OFST 0
#define FCDI_EVENT_PORT_CONFIG_DATA_LBN 0
#define FCDI_EVENT_PORT_CONFIG_DATA_WIDTH 32
+#define FCDI_EVENT_BOOT_RESULT_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_AOE/MC_CMD_AOE_OUT_INFO/FC_BOOT_RESULT */
+#define FCDI_EVENT_BOOT_RESULT_LBN 0
+#define FCDI_EVENT_BOOT_RESULT_WIDTH 32
/* FCDI_EXTENDED_EVENT_PPS structuredef: Extended FCDI event to send PPS events
* to the MC. Note that this structure | is overlayed over a normal FCDI event
@@ -1649,15 +1665,30 @@
/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS msgresponse */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_LEN 16
-/* Uncorrected error on transmit timestamps in NIC clock format */
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_TRANSMIT_OFST 0
-/* Uncorrected error on receive timestamps in NIC clock format */
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_RECEIVE_OFST 4
/* Uncorrected error on PPS output in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_OUT_OFST 8
/* Uncorrected error on PPS input in NIC clock format */
#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_PPS_IN_OFST 12
+/* MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2 msgresponse */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_LEN 24
+/* Uncorrected error on PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_TX_OFST 0
+/* Uncorrected error on PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PTP_RX_OFST 4
+/* Uncorrected error on PPS output in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_OUT_OFST 8
+/* Uncorrected error on PPS input in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_PPS_IN_OFST 12
+/* Uncorrected error on non-PTP transmit timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_TX_OFST 16
+/* Uncorrected error on non-PTP receive timestamps in NIC clock format */
+#define MC_CMD_PTP_OUT_GET_TIMESTAMP_CORRECTIONS_V2_GENERAL_RX_OFST 20
+
/* MC_CMD_PTP_OUT_MANFTEST_PPS msgresponse */
#define MC_CMD_PTP_OUT_MANFTEST_PPS_LEN 4
/* Results of testing */
@@ -2158,8 +2189,12 @@
/* MC_CMD_DRV_ATTACH_IN msgrequest */
#define MC_CMD_DRV_ATTACH_IN_LEN 12
-/* new state (0=detached, 1=attached) to set if UPDATE=1 */
+/* new state to set if UPDATE=1 */
#define MC_CMD_DRV_ATTACH_IN_NEW_STATE_OFST 0
+#define MC_CMD_DRV_ATTACH_LBN 0
+#define MC_CMD_DRV_ATTACH_WIDTH 1
+#define MC_CMD_DRV_PREBOOT_LBN 1
+#define MC_CMD_DRV_PREBOOT_WIDTH 1
/* 1 to set new state, or 0 to just report the existing state */
#define MC_CMD_DRV_ATTACH_IN_UPDATE_OFST 4
/* preferred datapath firmware (for Huntington; ignored for Siena) */
@@ -2181,12 +2216,12 @@
/* MC_CMD_DRV_ATTACH_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_OUT_LEN 4
-/* previous or existing state (0=detached, 1=attached) */
+/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_OUT_OLD_STATE_OFST 0
/* MC_CMD_DRV_ATTACH_EXT_OUT msgresponse */
#define MC_CMD_DRV_ATTACH_EXT_OUT_LEN 8
-/* previous or existing state (0=detached, 1=attached) */
+/* previous or existing state, see the bitmask at NEW_STATE */
#define MC_CMD_DRV_ATTACH_EXT_OUT_OLD_STATE_OFST 0
/* Flags associated with this function */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FUNC_FLAGS_OFST 4
@@ -2198,6 +2233,10 @@
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL 0x1
/* enum: The function can perform privileged operations */
#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_TRUSTED 0x2
+/* enum: The function does not have an active port associated with it. The port
+ * refers to the Sorrento external FPGA port.
+ */
+#define MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT 0x3
/***********************************/
@@ -2892,7 +2931,7 @@
*/
#define MC_CMD_SET_MAC 0x2c
-#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_LINK
+#define MC_CMD_0x2c_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_SET_MAC_IN msgrequest */
#define MC_CMD_SET_MAC_IN_LEN 28
@@ -2927,9 +2966,66 @@
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_LBN 0
#define MC_CMD_SET_MAC_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* MC_CMD_SET_MAC_EXT_IN msgrequest */
+#define MC_CMD_SET_MAC_EXT_IN_LEN 32
+/* The MTU is the MTU programmed directly into the XMAC/GMAC (inclusive of
+ * EtherII, VLAN, bug16011 padding).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_MTU_OFST 0
+#define MC_CMD_SET_MAC_EXT_IN_DRAIN_OFST 4
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LEN 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_LO_OFST 8
+#define MC_CMD_SET_MAC_EXT_IN_ADDR_HI_OFST 12
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_OFST 16
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_UNCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_REJECT_BRDCST_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_FCNTL_OFST 20
+/* enum: Flow control is off. */
+/* MC_CMD_FCNTL_OFF 0x0 */
+/* enum: Respond to flow control. */
+/* MC_CMD_FCNTL_RESPOND 0x1 */
+/* enum: Respond to and Issue flow control. */
+/* MC_CMD_FCNTL_BIDIR 0x2 */
+/* enum: Auto neg flow control. */
+/* MC_CMD_FCNTL_AUTO 0x3 */
+/* enum: Priority flow control (eftest builds only). */
+/* MC_CMD_FCNTL_QBB 0x4 */
+/* enum: Issue flow control. */
+/* MC_CMD_FCNTL_GENERATE 0x5 */
+#define MC_CMD_SET_MAC_EXT_IN_FLAGS_OFST 24
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_FLAG_INCLUDE_FCS_WIDTH 1
+/* Select which parameters to configure. A parameter will only be modified if
+ * the corresponding control flag is set. If SET_MAC_ENHANCED is not set in
+ * capabilities then this field is ignored (and all flags are assumed to be
+ * set).
+ */
+#define MC_CMD_SET_MAC_EXT_IN_CONTROL_OFST 28
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_LBN 0
+#define MC_CMD_SET_MAC_EXT_IN_CFG_MTU_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_LBN 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_DRAIN_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_LBN 2
+#define MC_CMD_SET_MAC_EXT_IN_CFG_REJECT_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_LBN 3
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCNTL_WIDTH 1
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_LBN 4
+#define MC_CMD_SET_MAC_EXT_IN_CFG_FCS_WIDTH 1
+
/* MC_CMD_SET_MAC_OUT msgresponse */
#define MC_CMD_SET_MAC_OUT_LEN 0
+/* MC_CMD_SET_MAC_V2_OUT msgresponse */
+#define MC_CMD_SET_MAC_V2_OUT_LEN 4
+/* MTU as configured after processing the request. See comment at
+ * MC_CMD_SET_MAC_IN/MTU. To query MTU without doing any changes, set CONTROL
+ * to 0.
+ */
+#define MC_CMD_SET_MAC_V2_OUT_MTU_OFST 0
+
/***********************************/
/* MC_CMD_PHY_STATS
@@ -3521,6 +3617,26 @@
#define MC_CMD_NVRAM_INFO_OUT_PHYSDEV_OFST 16
#define MC_CMD_NVRAM_INFO_OUT_PHYSADDR_OFST 20
+/* MC_CMD_NVRAM_INFO_V2_OUT msgresponse */
+#define MC_CMD_NVRAM_INFO_V2_OUT_LEN 28
+#define MC_CMD_NVRAM_INFO_V2_OUT_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_INFO_V2_OUT_SIZE_OFST 4
+#define MC_CMD_NVRAM_INFO_V2_OUT_ERASESIZE_OFST 8
+#define MC_CMD_NVRAM_INFO_V2_OUT_FLAGS_OFST 12
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_LBN 0
+#define MC_CMD_NVRAM_INFO_V2_OUT_PROTECTED_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_LBN 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_TLV_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_LBN 7
+#define MC_CMD_NVRAM_INFO_V2_OUT_A_B_WIDTH 1
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSDEV_OFST 16
+#define MC_CMD_NVRAM_INFO_V2_OUT_PHYSADDR_OFST 20
+/* Writes must be multiples of this size. Added to support the MUM on Sorrento.
+ */
+#define MC_CMD_NVRAM_INFO_V2_OUT_WRITESIZE_OFST 24
+
/***********************************/
/* MC_CMD_NVRAM_UPDATE_START
@@ -3561,6 +3677,37 @@
/* amount to read in bytes */
#define MC_CMD_NVRAM_READ_IN_LENGTH_OFST 8
+/* MC_CMD_NVRAM_READ_IN_V2 msgrequest */
+#define MC_CMD_NVRAM_READ_IN_V2_LEN 16
+#define MC_CMD_NVRAM_READ_IN_V2_TYPE_OFST 0
+/* Enum values, see field(s): */
+/* MC_CMD_NVRAM_TYPES/MC_CMD_NVRAM_TYPES_OUT/TYPES */
+#define MC_CMD_NVRAM_READ_IN_V2_OFFSET_OFST 4
+/* amount to read in bytes */
+#define MC_CMD_NVRAM_READ_IN_V2_LENGTH_OFST 8
+/* Optional control info. If a partition is stored with an A/B versioning
+ * scheme (i.e. in more than one physical partition in NVRAM) the host can set
+ * this to control which underlying physical partition is used to read data
+ * from. This allows it to perform a read-modify-write-verify with the write
+ * lock continuously held by calling NVRAM_UPDATE_START, reading the old
+ * contents using MODE=TARGET_CURRENT, overwriting the old partition and then
+ * verifying by reading with MODE=TARGET_BACKUP.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_MODE_OFST 12
+/* enum: Same as omitting MODE: caller sees data in current partition unless it
+ * holds the write lock in which case it sees data in the partition it is
+ * updating.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_DEFAULT 0x0
+/* enum: Read from the current partition of an A/B pair, even if holding the
+ * write lock.
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_CURRENT 0x1
+/* enum: Read from the non-current (i.e. to be updated) partition of an A/B
+ * pair
+ */
+#define MC_CMD_NVRAM_READ_IN_V2_TARGET_BACKUP 0x2
+
/* MC_CMD_NVRAM_READ_OUT msgresponse */
#define MC_CMD_NVRAM_READ_OUT_LENMIN 1
#define MC_CMD_NVRAM_READ_OUT_LENMAX 252
@@ -3895,6 +4042,8 @@
#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY 0x39
/* enum: CCOM AVREG 1v8 supply (external ADC): mV */
#define MC_CMD_SENSOR_CCOM_AVREG_1V8_SUPPLY_EXTADC 0x3a
+/* enum: CCOM RTS temperature: degC */
+#define MC_CMD_SENSOR_CONTROLLER_RTS 0x3b
/* enum: Not a sensor: reserved for the next page flag */
#define MC_CMD_SENSOR_PAGE1_NEXT 0x3f
/* enum: controller internal temperature sensor voltage on master core
@@ -3931,6 +4080,12 @@
#define MC_CMD_SENSOR_PHY0_VCC 0x4c
/* enum: Voltage supplied to the QSFP #1 from their power supply: mV */
#define MC_CMD_SENSOR_PHY1_VCC 0x4d
+/* enum: Controller die temperature (TDIODE): degC */
+#define MC_CMD_SENSOR_CONTROLLER_TDIODE_TEMP 0x4e
+/* enum: Board temperature (front): degC */
+#define MC_CMD_SENSOR_BOARD_FRONT_TEMP 0x4f
+/* enum: Board temperature (back): degC */
+#define MC_CMD_SENSOR_BOARD_BACK_TEMP 0x50
/* MC_CMD_SENSOR_INFO_ENTRY_TYPEDEF */
#define MC_CMD_SENSOR_ENTRY_OFST 4
#define MC_CMD_SENSOR_ENTRY_LEN 8
@@ -4007,7 +4162,7 @@
/* MC_CMD_READ_SENSORS_EXT_IN msgrequest */
#define MC_CMD_READ_SENSORS_EXT_IN_LEN 12
-/* DMA address of host buffer for sensor readings */
+/* DMA address of host buffer for sensor readings (must be 4Kbyte aligned). */
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_OFST 0
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LEN 8
#define MC_CMD_READ_SENSORS_EXT_IN_DMA_ADDR_LO_OFST 0
@@ -4608,6 +4763,10 @@
* operations
*/
#define MC_CMD_MUM_OP_QSFP 0xc
+/* enum: Request discrete and SODIMM DDR info (type, size, speed grade, voltage
+ * level) from MUM
+ */
+#define MC_CMD_MUM_OP_READ_DDR_INFO 0xd
/* MC_CMD_MUM_IN_NULL msgrequest */
#define MC_CMD_MUM_IN_NULL_LEN 4
@@ -4793,6 +4952,10 @@
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_FLAGS_OFST 8
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_LBN 0
#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_OVERCLOCK_110_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_LBN 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_NIC_FROM_FPGA_WIDTH 1
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_LBN 2
+#define MC_CMD_MUM_IN_PROGRAM_CLOCKS_CLOCK_REF_FROM_XO_WIDTH 1
/* MC_CMD_MUM_IN_FPGA_LOAD msgrequest */
#define MC_CMD_MUM_IN_FPGA_LOAD_LEN 8
@@ -4862,6 +5025,11 @@
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_HDR_OFST 4
#define MC_CMD_MUM_IN_QSFP_POLL_BIST_IDX_OFST 8
+/* MC_CMD_MUM_IN_READ_DDR_INFO msgrequest */
+#define MC_CMD_MUM_IN_READ_DDR_INFO_LEN 4
+/* MUM cmd header */
+/* MC_CMD_MUM_IN_CMD_OFST 0 */
+
/* MC_CMD_MUM_OUT msgresponse */
#define MC_CMD_MUM_OUT_LEN 0
@@ -5004,6 +5172,69 @@
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_LEN 4
#define MC_CMD_MUM_OUT_QSFP_POLL_BIST_TEST_OFST 0
+/* MC_CMD_MUM_OUT_READ_DDR_INFO msgresponse */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMIN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LENMAX 248
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_LEN(num) (8+8*(num))
+/* Discrete (soldered) DDR resistor strap info */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_DISCRETE_DDR_INFO_OFST 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VRATIO_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED1_WIDTH 16
+/* Number of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_RECORDS_OFST 4
+/* Array of SODIMM info records */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LEN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_LO_OFST 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_HI_OFST 12
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MINNUM 2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SODIMM_INFO_RECORD_MAXNUM 30
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_LBN 0
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK_ID_WIDTH 8
+/* enum: SODIMM bank 1 (Top SODIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK1 0x0
+/* enum: SODIMM bank 2 (Bottom SODDIMM for Sorrento) */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_BANK2 0x1
+/* enum: Total number of SODIMM banks */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NUM_BANKS 0x2
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_LBN 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_TYPE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_LBN 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RANK_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_LBN 20
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_VOLTAGE_WIDTH 4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_POWERED 0x0 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V25 0x1 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V35 0x2 /* enum */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V5 0x3 /* enum */
+/* enum: Values 5-15 are reserved for future usage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_1V8 0x4
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_LBN 24
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SIZE_WIDTH 8
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_LBN 32
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_SPEED_WIDTH 16
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_LBN 48
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_STATE_WIDTH 4
+/* enum: No module present */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_ABSENT 0x0
+/* enum: Module present supported and powered on */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_POWERED 0x1
+/* enum: Module present but bad type */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_TYPE 0x2
+/* enum: Module present but incompatible voltage */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_VOLTAGE 0x3
+/* enum: Module present but unknown SPD */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SPD 0x4
+/* enum: Module present but slot cannot support it */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_PRESENT_BAD_SLOT 0x5
+/* enum: Modules may or may not be present, but cannot establish contact by I2C
+ */
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_NOT_REACHABLE 0x6
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_LBN 52
+#define MC_CMD_MUM_OUT_READ_DDR_INFO_RESERVED2_WIDTH 12
+
/* MC_CMD_RESOURCE_SPECIFIER enum */
/* enum: Any */
#define MC_CMD_RESOURCE_INSTANCE_ANY 0xffffffff
@@ -5076,6 +5307,8 @@
#define NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG 0x500
/* enum: Expansion ROM configuration data for port 0 */
#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0 0x600
+/* enum: Synonym for EXPROM_CONFIG_PORT0 as used in pmap files */
+#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG 0x600
/* enum: Expansion ROM configuration data for port 1 */
#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1 0x601
/* enum: Expansion ROM configuration data for port 2 */
@@ -5084,6 +5317,8 @@
#define NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3 0x603
/* enum: Non-volatile log output partition */
#define NVRAM_PARTITION_TYPE_LOG 0x700
+/* enum: Non-volatile log output of second core on dual-core device */
+#define NVRAM_PARTITION_TYPE_LOG_SLAVE 0x701
/* enum: Device state dump output partition */
#define NVRAM_PARTITION_TYPE_DUMP 0x800
/* enum: Application license key storage partition */
@@ -5116,6 +5351,20 @@
#define NVRAM_PARTITION_TYPE_MUM_USER_ROM 0xc05
/* enum: MUM fuses and lockbits partition. */
#define NVRAM_PARTITION_TYPE_MUM_FUSELOCK 0xc06
+/* enum: UEFI expansion ROM if separate from PXE */
+#define NVRAM_PARTITION_TYPE_EXPANSION_UEFI 0xd00
+/* enum: Spare partition 0 */
+#define NVRAM_PARTITION_TYPE_SPARE_0 0x1000
+/* enum: Spare partition 1 */
+#define NVRAM_PARTITION_TYPE_SPARE_1 0x1100
+/* enum: Spare partition 2 */
+#define NVRAM_PARTITION_TYPE_SPARE_2 0x1200
+/* enum: Spare partition 3 */
+#define NVRAM_PARTITION_TYPE_SPARE_3 0x1300
+/* enum: Spare partition 4 */
+#define NVRAM_PARTITION_TYPE_SPARE_4 0x1400
+/* enum: Spare partition 5 */
+#define NVRAM_PARTITION_TYPE_SPARE_5 0x1500
/* enum: Start of reserved value range (firmware may use for any purpose) */
#define NVRAM_PARTITION_TYPE_RESERVED_VALUES_MIN 0xff00
/* enum: End of reserved value range (firmware may use for any purpose) */
@@ -5149,6 +5398,90 @@
#define LICENSED_APP_ID_ID_LBN 0
#define LICENSED_APP_ID_ID_WIDTH 32
+/* LICENSED_FEATURES structuredef */
+#define LICENSED_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_FEATURES_MASK_OFST 0
+#define LICENSED_FEATURES_MASK_LEN 8
+#define LICENSED_FEATURES_MASK_LO_OFST 0
+#define LICENSED_FEATURES_MASK_HI_OFST 4
+#define LICENSED_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_PIO_LBN 1
+#define LICENSED_FEATURES_PIO_WIDTH 1
+#define LICENSED_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_FEATURES_CLOCK_LBN 3
+#define LICENSED_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_LBN 9
+#define LICENSED_FEATURES_EVENT_CUT_THROUGH_WIDTH 1
+#define LICENSED_FEATURES_MASK_LBN 0
+#define LICENSED_FEATURES_MASK_WIDTH 64
+
+/* LICENSED_V3_APPS structuredef */
+#define LICENSED_V3_APPS_LEN 8
+/* Bitmask of licensed applications */
+#define LICENSED_V3_APPS_MASK_OFST 0
+#define LICENSED_V3_APPS_MASK_LEN 8
+#define LICENSED_V3_APPS_MASK_LO_OFST 0
+#define LICENSED_V3_APPS_MASK_HI_OFST 4
+#define LICENSED_V3_APPS_ONLOAD_LBN 0
+#define LICENSED_V3_APPS_ONLOAD_WIDTH 1
+#define LICENSED_V3_APPS_PTP_LBN 1
+#define LICENSED_V3_APPS_PTP_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_LBN 2
+#define LICENSED_V3_APPS_SOLARCAPTURE_PRO_WIDTH 1
+#define LICENSED_V3_APPS_SOLARSECURE_LBN 3
+#define LICENSED_V3_APPS_SOLARSECURE_WIDTH 1
+#define LICENSED_V3_APPS_PERF_MONITOR_LBN 4
+#define LICENSED_V3_APPS_PERF_MONITOR_WIDTH 1
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_LBN 5
+#define LICENSED_V3_APPS_SOLARCAPTURE_LIVE_WIDTH 1
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_LBN 6
+#define LICENSED_V3_APPS_CAPTURE_SOLARSYSTEM_WIDTH 1
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_LBN 7
+#define LICENSED_V3_APPS_NETWORK_ACCESS_CONTROL_WIDTH 1
+#define LICENSED_V3_APPS_MASK_LBN 0
+#define LICENSED_V3_APPS_MASK_WIDTH 64
+
+/* LICENSED_V3_FEATURES structuredef */
+#define LICENSED_V3_FEATURES_LEN 8
+/* Bitmask of licensed firmware features */
+#define LICENSED_V3_FEATURES_MASK_OFST 0
+#define LICENSED_V3_FEATURES_MASK_LEN 8
+#define LICENSED_V3_FEATURES_MASK_LO_OFST 0
+#define LICENSED_V3_FEATURES_MASK_HI_OFST 4
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_LBN 0
+#define LICENSED_V3_FEATURES_RX_CUT_THROUGH_WIDTH 1
+#define LICENSED_V3_FEATURES_PIO_LBN 1
+#define LICENSED_V3_FEATURES_PIO_WIDTH 1
+#define LICENSED_V3_FEATURES_EVQ_TIMER_LBN 2
+#define LICENSED_V3_FEATURES_EVQ_TIMER_WIDTH 1
+#define LICENSED_V3_FEATURES_CLOCK_LBN 3
+#define LICENSED_V3_FEATURES_CLOCK_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_LBN 4
+#define LICENSED_V3_FEATURES_RX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_LBN 5
+#define LICENSED_V3_FEATURES_TX_TIMESTAMPS_WIDTH 1
+#define LICENSED_V3_FEATURES_RX_SNIFF_LBN 6
+#define LICENSED_V3_FEATURES_RX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_TX_SNIFF_LBN 7
+#define LICENSED_V3_FEATURES_TX_SNIFF_WIDTH 1
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_LBN 8
+#define LICENSED_V3_FEATURES_PROXY_FILTER_OPS_WIDTH 1
+#define LICENSED_V3_FEATURES_MASK_LBN 0
+#define LICENSED_V3_FEATURES_MASK_WIDTH 64
+
/* TX_TIMESTAMP_EVENT structuredef */
#define TX_TIMESTAMP_EVENT_LEN 6
/* lower 16 bits of timestamp data */
@@ -5258,6 +5591,8 @@
#define MC_CMD_INIT_EVQ_IN_FLAG_RX_MERGE_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_LBN 5
#define MC_CMD_INIT_EVQ_IN_FLAG_TX_MERGE_WIDTH 1
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_LBN 6
+#define MC_CMD_INIT_EVQ_IN_FLAG_USE_TIMER_WIDTH 1
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_OFST 20
/* enum: Disabled */
#define MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS 0x0
@@ -5362,6 +5697,8 @@
#define MC_CMD_INIT_RXQ_IN_FLAG_PREFIX_WIDTH 1
#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_LBN 9
#define MC_CMD_INIT_RXQ_IN_FLAG_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_LBN 10
+#define MC_CMD_INIT_RXQ_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -5422,6 +5759,8 @@
#define MC_CMD_INIT_RXQ_EXT_IN_PS_BUFF_64K 0x4 /* enum */
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_LBN 18
#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES_WIDTH 1
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_LBN 19
+#define MC_CMD_INIT_RXQ_EXT_IN_FLAG_FORCE_EV_MERGING_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_RXQ_EXT_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -5535,6 +5874,8 @@
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN_WIDTH 1
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_LBN 11
#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN_WIDTH 1
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_LBN 12
+#define MC_CMD_INIT_TXQ_EXT_IN_FLAG_TSOV2_EN_WIDTH 1
/* Owner ID to use if in buffer mode (zero if physical) */
#define MC_CMD_INIT_TXQ_EXT_IN_OWNER_ID_OFST 20
/* The port ID associated with the v-adaptor which should contain this DMAQ. */
@@ -5747,6 +6088,46 @@
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_OFST 44
#define MC_CMD_PROXY_CONFIGURE_IN_ALLOWED_MCDI_MASK_LEN 64
+/* MC_CMD_PROXY_CONFIGURE_EXT_IN msgrequest */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_LEN 112
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_FLAGS_OFST 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_LBN 0
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ENABLE_WIDTH 1
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REQUEST_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_LO_OFST 4
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BUFF_ADDR_HI_OFST 8
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_STATUS_BLOCK_SIZE_OFST 12
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size REPLY_BLOCK_SIZE.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_LO_OFST 16
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BUFF_ADDR_HI_OFST 20
+/* Must be a power of 2 */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REQUEST_BLOCK_SIZE_OFST 24
+/* Host provides a contiguous memory buffer that contains at least NUM_BLOCKS
+ * of blocks, each of the size STATUS_BLOCK_SIZE. This buffer is only needed if
+ * host intends to complete proxied operations by using MC_CMD_PROXY_CMD.
+ */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LEN 8
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_LO_OFST 28
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BUFF_ADDR_HI_OFST 32
+/* Must be a power of 2, or zero if this buffer is not provided */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_REPLY_BLOCK_SIZE_OFST 36
+/* Applies to all three buffers */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_NUM_BLOCKS_OFST 40
+/* A bit mask defining which MCDI operations may be proxied */
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_OFST 44
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_ALLOWED_MCDI_MASK_LEN 64
+#define MC_CMD_PROXY_CONFIGURE_EXT_IN_RESERVED_OFST 108
+
/* MC_CMD_PROXY_CONFIGURE_OUT msgresponse */
#define MC_CMD_PROXY_CONFIGURE_OUT_LEN 0
@@ -6323,6 +6704,15 @@
* client
*/
#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_RESTRICTIONS 0x2
+/* enum: read properties relating to security rules (Medford-only; for use by
+ * SolarSecure apps, not directly by drivers. See SF-114946-SW.)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SECURITY_RULE_INFO 0x3
+/* enum: read the list of supported RX filter matches for VXLAN/NVGRE
+ * encapsulated frames, which follow a different match sequence to normal
+ * frames (Medford only)
+ */
+#define MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES 0x4
/* MC_CMD_GET_PARSER_DISP_INFO_OUT msgresponse */
#define MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMIN 8
@@ -6356,7 +6746,10 @@
/***********************************/
/* MC_CMD_PARSER_DISP_RW
- * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging
+ * Direct read/write of parser-dispatcher state (DICPUs and LUE) for debugging.
+ * Please note that this interface is only of use to debug tools which have
+ * knowledge of firmware and hardware data structures; nothing here is intended
+ * for use by normal driver code.
*/
#define MC_CMD_PARSER_DISP_RW 0xe5
@@ -6374,6 +6767,12 @@
#define MC_CMD_PARSER_DISP_RW_IN_LUE 0x2
/* enum: Lookup engine (with requested metadata format) */
#define MC_CMD_PARSER_DISP_RW_IN_LUE_VERSIONED_METADATA 0x3
+/* enum: RX0 dispatcher CPU (alias for RX_DICPU; Medford has 2 RX DICPUs) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX0_DICPU 0x0
+/* enum: RX1 dispatcher CPU (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_RX1_DICPU 0x4
+/* enum: Miscellaneous other state (only valid for Medford) */
+#define MC_CMD_PARSER_DISP_RW_IN_MISC_STATE 0x5
/* identifies the type of operation requested */
#define MC_CMD_PARSER_DISP_RW_IN_OP_OFST 4
/* enum: read a word of DICPU DMEM or a LUE entry */
@@ -6382,8 +6781,12 @@
#define MC_CMD_PARSER_DISP_RW_IN_WRITE 0x1
/* enum: read-modify-write a word of DICPU DMEM (not valid for LUE) */
#define MC_CMD_PARSER_DISP_RW_IN_RMW 0x2
-/* data memory address or LUE index */
+/* data memory address (DICPU targets) or LUE index (LUE targets) */
#define MC_CMD_PARSER_DISP_RW_IN_ADDRESS_OFST 8
+/* selector (for MISC_STATE target) */
+#define MC_CMD_PARSER_DISP_RW_IN_SELECTOR_OFST 8
+/* enum: Port to datapath mapping */
+#define MC_CMD_PARSER_DISP_RW_IN_PORT_DP_MAPPING 0x1
/* value to write (for DMEM writes) */
#define MC_CMD_PARSER_DISP_RW_IN_DMEM_WRITE_VALUE_OFST 12
/* XOR value (for DMEM read-modify-writes: new = (old & mask) ^ value) */
@@ -6408,6 +6811,12 @@
*/
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_OFST 20
#define MC_CMD_PARSER_DISP_RW_OUT_LUE_MGR_STATE_LEN 32
+/* datapath(s) used for each port (for MISC_STATE PORT_DP_MAPPING selector) */
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_OFST 0
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_LEN 4
+#define MC_CMD_PARSER_DISP_RW_OUT_PORT_DP_MAPPING_NUM 4
+#define MC_CMD_PARSER_DISP_RW_OUT_DP0 0x1 /* enum */
+#define MC_CMD_PARSER_DISP_RW_OUT_DP1 0x2 /* enum */
/***********************************/
@@ -7071,6 +7480,24 @@
#define MC_CMD_GET_CAPABILITIES_OUT_LEN 20
/* First word of flags. */
#define MC_CMD_GET_CAPABILITIES_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
#define MC_CMD_GET_CAPABILITIES_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_ADDITIONAL_RSS_MODES_LBN 13
@@ -7138,6 +7565,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
/* enum: RXDP Test firmware image 8 */
#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
/* TxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_OFST 6
#define MC_CMD_GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID_LEN 2
@@ -7153,6 +7582,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
/* enum: TXDP Test firmware image 2 */
#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_OUT_TXDP_TEST_FW_CSR 0x103
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_OFST 8
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_LEN 2
#define MC_CMD_GET_CAPABILITIES_OUT_RXPD_FW_VERSION_REV_LBN 0
@@ -7227,6 +7658,258 @@
/* Licensed capabilities */
#define MC_CMD_GET_CAPABILITIES_OUT_LICENSE_CAPABILITIES_OFST 16
+/* MC_CMD_GET_CAPABILITIES_V2_IN msgrequest */
+#define MC_CMD_GET_CAPABILITIES_V2_IN_LEN 0
+
+/* MC_CMD_GET_CAPABILITIES_V2_OUT msgresponse */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LEN 72
+/* First word of flags. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS1_OFST 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VPORT_RECONFIGURE_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_STRIPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_LBN 5
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_QUERY_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_LBN 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_PORT_VLAN_RESTRICT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_LBN 7
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_DRV_ATTACH_PREBOOT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_LBN 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_FORCE_EVENT_MERGING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_LBN 9
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SET_MAC_ENHANCED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_LBN 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_UNKNOWN_UCAST_DST_FILTER_ALWAYS_MULTI_RECIPIENT_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 11
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MAC_SECURITY_FILTERING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_LBN 13
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ADDITIONAL_RSS_MODES_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_LBN 14
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_QBB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_LBN 15
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_VAR_BUFFERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_LBN 16
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_RSS_LIMITED_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_LBN 17
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PACKED_STREAM_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_LBN 18
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_INCLUDE_FCS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_LBN 19
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_VLAN_INSERTION_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_LBN 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_VLAN_STRIPPING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_LBN 21
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_LBN 22
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_0_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_LBN 23
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_PREFIX_LEN_14_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_LBN 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_TIMESTAMP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_LBN 25
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_BATCHING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_LBN 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_MCAST_FILTER_CHAINING_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_LBN 27
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_LBN 28
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DISABLE_SCATTER_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_LBN 29
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_MCAST_UDP_LOOPBACK_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVB_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_LBN 31
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_VXLAN_NVGRE_WIDTH 1
+/* RxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_OFST 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DPCPU_FW_ID_LEN 2
+/* enum: Standard RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP 0x0
+/* enum: Low latency RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_LOW_LATENCY 0x1
+/* enum: Packed stream RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_PACKED_STREAM 0x2
+/* enum: BIST RXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_BIST 0x10a
+/* enum: RXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_CUT_THROUGH 0x101
+/* enum: RXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD 0x102
+/* enum: RXDP Test firmware image 3 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_TO_MC_STORE_FORWARD_FIRST 0x103
+/* enum: RXDP Test firmware image 4 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_EVERY_EVENT_BATCHABLE 0x104
+/* enum: RXDP Test firmware image 5 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_BACKPRESSURE 0x105
+/* enum: RXDP Test firmware image 6 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_PACKET_EDITS 0x106
+/* enum: RXDP Test firmware image 7 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_RX_HDR_SPLIT 0x107
+/* enum: RXDP Test firmware image 8 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DISABLE_DL 0x108
+/* enum: RXDP Test firmware image 9 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXDP_TEST_FW_DOORBELL_DELAY 0x10b
+/* TxDPCPU firmware id. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_OFST 6
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DPCPU_FW_ID_LEN 2
+/* enum: Standard TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP 0x0
+/* enum: Low latency TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_LOW_LATENCY 0x1
+/* enum: High packet rate TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_HIGH_PACKET_RATE 0x3
+/* enum: BIST TXDP firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_BIST 0x12d
+/* enum: TXDP Test firmware image 1 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_TSO_EDIT 0x101
+/* enum: TXDP Test firmware image 2 */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_PACKET_EDITS 0x102
+/* enum: TXDP CSR bus test firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXDP_TEST_FW_CSR 0x103
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_OFST 8
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial RX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: RX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant RX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+/* enum: Low latency RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LOW_LATENCY 0x5
+/* enum: Packed stream RX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_PACKED_STREAM 0x6
+/* enum: RX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* enum: RX PD firmware parsing but not filtering network overlay tunnel
+ * encapsulations (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RXPD_FW_TYPE_TESTFW_ENCAP_PARSING_ONLY 0xf
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_OFST 10
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_REV_WIDTH 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_LBN 12
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_VERSION_TYPE_WIDTH 4
+/* enum: reserved value - do not use (may indicate alternative interpretation
+ * of REV field in future)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_RESERVED 0x0
+/* enum: Trivial TX PD firmware for early Huntington development (Huntington
+ * development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_FIRST_PKT 0x1
+/* enum: TX PD firmware with approximately Siena-compatible behaviour
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT 0x2
+/* enum: Virtual switching (full feature) TX PD production firmware */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_VSWITCH 0x3
+/* enum: siena_compat variant TX PD firmware using PM rather than MAC
+ * (Huntington development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_SIENA_COMPAT_PM 0x4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LOW_LATENCY 0x5 /* enum */
+/* enum: TX PD firmware handling layer 2 only for high packet rate performance
+ * tests (Medford development only)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_LAYER2_PERF 0x7
+/* enum: RX PD firmware for GUE parsing prototype (Medford development only) */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TXPD_FW_TYPE_TESTFW_GUE_PROTOTYPE 0xe
+/* Hardware capabilities of NIC */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_HW_CAPABILITIES_OFST 12
+/* Licensed capabilities */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_LICENSE_CAPABILITIES_OFST 16
+/* Second word of flags. Not present on older firmware (check the length). */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_FLAGS2_OFST 20
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN 0
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_LBN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_ENCAP_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_LBN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVQ_TIMER_CTRL_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_LBN 3
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_EVENT_CUT_THROUGH_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_LBN 4
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_CUT_THROUGH_WIDTH 1
+/* Number of FATSOv2 contexts per datapath supported by this NIC. Not present
+ * on older firmware (check the length).
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_OFST 24
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS_LEN 2
+/* One byte per PF containing the number of the external port assigned to this
+ * PF, indexed by PF number. Special values indicate that a PF is either not
+ * present or not assigned.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_OFST 26
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PFS_TO_PORTS_ASSIGNMENT_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff
+/* enum: PF does not exist. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe
+/* enum: PF does exist but is not assigned to any external port. */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_ASSIGNED 0xfd
+/* enum: This value indicates that PF is assigned, but it cannot be expressed
+ * in this field. It is intended for a possible future situation where a more
+ * complex scheme of PFs to ports mapping is being used. The future driver
+ * should look for a new field supporting the new scheme. The current/old
+ * driver should treat this value as PF_NOT_ASSIGNED.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_INCOMPATIBLE_ASSIGNMENT 0xfc
+/* One byte per PF containing the number of its VFs, indexed by PF number. A
+ * special value indicates that a PF is not present.
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_OFST 42
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_LEN 1
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VFS_PER_PF_NUM 16
+/* enum: The caller is not permitted to access information on this PF. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_ACCESS_NOT_PERMITTED 0xff */
+/* enum: PF does not exist. */
+/* MC_CMD_GET_CAPABILITIES_V2_OUT_PF_NOT_PRESENT 0xfe */
+/* Number of VIs available for each external port */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_OFST 58
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_LEN 2
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_VIS_PER_PORT_NUM 4
+/* Size of RX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ RX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_OFST 66
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_RX_DESC_CACHE_SIZE_LEN 1
+/* Size of TX descriptor cache expressed as binary logarithm The actual size
+ * equals (2 ^ TX_DESC_CACHE_SIZE)
+ */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_OFST 67
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_TX_DESC_CACHE_SIZE_LEN 1
+/* Total number of available PIO buffers */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_OFST 68
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_NUM_PIO_BUFFS_LEN 2
+/* Size of a single PIO buffer */
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_OFST 70
+#define MC_CMD_GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF_LEN 2
+
/***********************************/
/* MC_CMD_V2_EXTN
@@ -7475,6 +8158,25 @@
/***********************************/
+/* MC_CMD_VSWITCH_QUERY
+ * read some config of v-switch. For now this command is an empty placeholder.
+ * It may be used to check if a v-switch is connected to a given EVB port (if
+ * not, then the command returns ENOENT).
+ */
+#define MC_CMD_VSWITCH_QUERY 0x63
+
+#define MC_CMD_0x63_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VSWITCH_QUERY_IN msgrequest */
+#define MC_CMD_VSWITCH_QUERY_IN_LEN 4
+/* The port to which the v-switch is connected. */
+#define MC_CMD_VSWITCH_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VSWITCH_QUERY_OUT msgresponse */
+#define MC_CMD_VSWITCH_QUERY_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_VPORT_ALLOC
* allocate a v-port.
*/
@@ -7510,6 +8212,8 @@
#define MC_CMD_VPORT_ALLOC_IN_FLAGS_OFST 8
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_LBN 0
#define MC_CMD_VPORT_ALLOC_IN_FLAG_AUTO_PORT_WIDTH 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_LBN 1
+#define MC_CMD_VPORT_ALLOC_IN_FLAG_VLAN_RESTRICT_WIDTH 1
/* The number of VLAN tags to insert/remove. An error will be returned if
* incompatible with the number of VLAN tags specified for the upstream
* v-switch.
@@ -7561,6 +8265,8 @@
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAGS_OFST 8
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_LBN 0
#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_AUTO_VADAPTOR_WIDTH 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_LBN 1
+#define MC_CMD_VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED_WIDTH 1
/* The number of VLAN tags to strip on receive */
#define MC_CMD_VADAPTOR_ALLOC_IN_NUM_VLANS_OFST 12
/* The number of VLAN tags to transparently insert/remove. */
@@ -7639,6 +8345,29 @@
/***********************************/
+/* MC_CMD_VADAPTOR_QUERY
+ * read some config of v-adaptor.
+ */
+#define MC_CMD_VADAPTOR_QUERY 0x61
+
+#define MC_CMD_0x61_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VADAPTOR_QUERY_IN msgrequest */
+#define MC_CMD_VADAPTOR_QUERY_IN_LEN 4
+/* The port to which the v-adaptor is connected. */
+#define MC_CMD_VADAPTOR_QUERY_IN_UPSTREAM_PORT_ID_OFST 0
+
+/* MC_CMD_VADAPTOR_QUERY_OUT msgresponse */
+#define MC_CMD_VADAPTOR_QUERY_OUT_LEN 12
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_PORT_FLAGS_OFST 0
+/* The v-adaptor flags as defined at MC_CMD_VADAPTOR_ALLOC. */
+#define MC_CMD_VADAPTOR_QUERY_OUT_VADAPTOR_FLAGS_OFST 4
+/* The number of VLAN tags that may still be added */
+#define MC_CMD_VADAPTOR_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 8
+
+
+/***********************************/
/* MC_CMD_EVB_PORT_ASSIGN
* assign a port to a PCI function.
*/
@@ -7875,10 +8604,17 @@
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN 8
/* The handle of the RSS context */
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_RSS_CONTEXT_ID_OFST 0
-/* Hash control flags. The _EN bits are always supported. The _MODE bits only
- * work when the firmware reports ADDITIONAL_RSS_MODES in
- * MC_CMD_GET_CAPABILITIES and override the _EN bits if any of them are not 0.
- * See the RSS_MODE structure for the meaning of the mode bits.
+/* Hash control flags. The _EN bits are always supported, but new modes are
+ * available when ADDITIONAL_RSS_MODES is reported by MC_CMD_GET_CAPABILITIES:
+ * in this case, the MODE fields may be set to non-zero values, and will take
+ * effect regardless of the settings of the _EN flags. See the RSS_MODE
+ * structure for the meaning of the mode bits. Drivers must check the
+ * capability before trying to set any _MODE fields, as older firmware will
+ * reject any attempt to set the FLAGS field to a value > 0xff with EINVAL. In
+ * the case where all the _MODE flags are zero, the _EN flags take effect,
+ * providing backward compatibility for existing drivers. (Setting all _MODE
+ * *and* all _EN flags to zero is valid, to disable RSS spreading for that
+ * particular packet type.)
*/
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_FLAGS_OFST 4
#define MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_TOEPLITZ_IPV4_EN_LBN 0
@@ -7923,11 +8659,18 @@
/* MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT msgresponse */
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_LEN 8
-/* Hash control flags. If any _MODE bits are non-zero (which will only be true
- * when the firmware reports ADDITIONAL_RSS_MODES) then the _EN bits should be
- * disregarded (but are guaranteed to be consistent with the _MODE bits if
- * RSS_CONTEXT_SET_FLAGS has never been called for this context since it was
- * allocated).
+/* Hash control flags. If all _MODE bits are zero (which will always be true
+ * for older firmware which does not report the ADDITIONAL_RSS_MODES
+ * capability), the _EN bits report the state. If any _MODE bits are non-zero
+ * (which will only be true when the firmware reports ADDITIONAL_RSS_MODES)
+ * then the _EN bits should be disregarded, although the _MODE flags are
+ * guaranteed to be consistent with the _EN flags for a freshly-allocated RSS
+ * context and in the case where the _EN flags were used in the SET. This
+ * provides backward compatibility: old drivers will not be attempting to
+ * derive any meaning from the _MODE bits (and can never set them to any value
+ * not representable by the _EN bits); new drivers can always determine the
+ * mode by looking only at the _MODE bits; the value returned by a GET can
+ * always be used for a SET regardless of old/new driver vs. old/new firmware.
*/
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_FLAGS_OFST 4
#define MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_TOEPLITZ_IPV4_EN_LBN 0
@@ -8155,6 +8898,74 @@
/***********************************/
+/* MC_CMD_VPORT_RECONFIGURE
+ * Replace VLAN tags and/or MAC addresses of an existing v-port. If the v-port
+ * has already been passed to another function (v-port's user), then that
+ * function will be reset before applying the changes.
+ */
+#define MC_CMD_VPORT_RECONFIGURE 0xeb
+
+#define MC_CMD_0xeb_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_VPORT_RECONFIGURE_IN msgrequest */
+#define MC_CMD_VPORT_RECONFIGURE_IN_LEN 44
+/* The handle of the v-port */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VPORT_ID_OFST 0
+/* Flags requesting what should be changed. */
+#define MC_CMD_VPORT_RECONFIGURE_IN_FLAGS_OFST 4
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_VLAN_TAGS_WIDTH 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_LBN 1
+#define MC_CMD_VPORT_RECONFIGURE_IN_REPLACE_MACADDRS_WIDTH 1
+/* The number of VLAN tags to insert/remove. An error will be returned if
+ * incompatible with the number of VLAN tags specified for the upstream
+ * v-switch.
+ */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_VLAN_TAGS_OFST 8
+/* The actual VLAN tags to insert/remove */
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAGS_OFST 12
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_0_WIDTH 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_LBN 16
+#define MC_CMD_VPORT_RECONFIGURE_IN_VLAN_TAG_1_WIDTH 16
+/* The number of MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_NUM_MACADDRS_OFST 16
+/* MAC addresses to add */
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_OFST 20
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_LEN 6
+#define MC_CMD_VPORT_RECONFIGURE_IN_MACADDRS_NUM 4
+
+/* MC_CMD_VPORT_RECONFIGURE_OUT msgresponse */
+#define MC_CMD_VPORT_RECONFIGURE_OUT_LEN 4
+#define MC_CMD_VPORT_RECONFIGURE_OUT_FLAGS_OFST 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_LBN 0
+#define MC_CMD_VPORT_RECONFIGURE_OUT_RESET_DONE_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_EVB_PORT_QUERY
+ * read some config of v-port.
+ */
+#define MC_CMD_EVB_PORT_QUERY 0x62
+
+#define MC_CMD_0x62_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_EVB_PORT_QUERY_IN msgrequest */
+#define MC_CMD_EVB_PORT_QUERY_IN_LEN 4
+/* The handle of the v-port */
+#define MC_CMD_EVB_PORT_QUERY_IN_PORT_ID_OFST 0
+
+/* MC_CMD_EVB_PORT_QUERY_OUT msgresponse */
+#define MC_CMD_EVB_PORT_QUERY_OUT_LEN 8
+/* The EVB port flags as defined at MC_CMD_VPORT_ALLOC. */
+#define MC_CMD_EVB_PORT_QUERY_OUT_PORT_FLAGS_OFST 0
+/* The number of VLAN tags that may be used on a v-adaptor connected to this
+ * EVB port.
+ */
+#define MC_CMD_EVB_PORT_QUERY_OUT_NUM_AVAILABLE_VLAN_TAGS_OFST 4
+
+
+/***********************************/
/* MC_CMD_DUMP_BUFTBL_ENTRIES
* Dump buffer table entries, mainly for command client debug use. Dumps
* absolute entries, and does not use chunk handles. All entries must be in
@@ -8196,6 +9007,14 @@
#define MC_CMD_SET_RXDP_CONFIG_IN_DATA_OFST 0
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_LBN 0
#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_LBN 1
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_LEN_WIDTH 2
+/* enum: pad to 64 bytes */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64 0x0
+/* enum: pad to 128 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128 0x1
+/* enum: pad to 256 bytes (Medford only) */
+#define MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256 0x2
/* MC_CMD_SET_RXDP_CONFIG_OUT msgresponse */
#define MC_CMD_SET_RXDP_CONFIG_OUT_LEN 0
@@ -8217,6 +9036,10 @@
#define MC_CMD_GET_RXDP_CONFIG_OUT_DATA_OFST 0
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_LBN 0
#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_DMA_WIDTH 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_LBN 1
+#define MC_CMD_GET_RXDP_CONFIG_OUT_PAD_HOST_LEN_WIDTH 2
+/* Enum values, see field(s): */
+/* MC_CMD_SET_RXDP_CONFIG/MC_CMD_SET_RXDP_CONFIG_IN/PAD_HOST_LEN */
/***********************************/
@@ -8788,32 +9611,38 @@
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_MAXNUM 63
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: Attenuation (0-15, TBD for Medford) */
+/* enum: Attenuation (0-15, Huntington) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_ATT 0x0
-/* enum: CTLE Boost (0-15, TBD for Medford) */
+/* enum: CTLE Boost (0-15, Huntington) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_BOOST 0x1
-/* enum: Edge DFE Tap1 (0 - max negative, 64 - zero, 127 - max positive, TBD
- * for Medford)
+/* enum: Edge DFE Tap1 (Huntington - 0 - max negative, 64 - zero, 127 - max
+ * positive, Medford - 0-31)
*/
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP1 0x2
-/* enum: Edge DFE Tap2 (0 - max negative, 32 - zero, 63 - max positive, TBD for
- * Medford)
+/* enum: Edge DFE Tap2 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-31)
*/
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP2 0x3
-/* enum: Edge DFE Tap3 (0 - max negative, 32 - zero, 63 - max positive, TBD for
- * Medford)
+/* enum: Edge DFE Tap3 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
*/
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP3 0x4
-/* enum: Edge DFE Tap4 (0 - max negative, 32 - zero, 63 - max positive, TBD for
- * Medford)
+/* enum: Edge DFE Tap4 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
*/
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP4 0x5
-/* enum: Edge DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive, TBD for
- * Medford)
+/* enum: Edge DFE Tap5 (Huntington - 0 - max negative, 32 - zero, 63 - max
+ * positive, Medford - 0-16)
*/
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_TAP5 0x6
-/* enum: Edge DFE DLEV (TBD for Medford) */
+/* enum: Edge DFE DLEV (0-128 for Medford) */
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_EDFE_DLEV 0x7
+/* enum: Variable Gain Amplifier (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_VGA 0x8
+/* enum: CTLE EQ Capacitor (0-15, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (0-7, Medford) */
+#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 3
#define MC_CMD_KR_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -8885,26 +9714,32 @@
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_MAXNUM 63
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_LBN 0
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_ID_WIDTH 8
-/* enum: TX Amplitude */
+/* enum: TX Amplitude (Huntington, Medford) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV 0x0
-/* enum: De-Emphasis Tap1 Magnitude (0-7) */
+/* enum: De-Emphasis Tap1 Magnitude (0-7) (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_MODE 0x1
/* enum: De-Emphasis Tap1 Fine */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_DTLEV 0x2
-/* enum: De-Emphasis Tap2 Magnitude (0-6) */
+/* enum: De-Emphasis Tap2 Magnitude (0-6) (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2 0x3
-/* enum: De-Emphasis Tap2 Fine */
+/* enum: De-Emphasis Tap2 Fine (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_D2TLEV 0x4
-/* enum: Pre-Emphasis Magnitude */
+/* enum: Pre-Emphasis Magnitude (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_E 0x5
-/* enum: Pre-Emphasis Fine */
+/* enum: Pre-Emphasis Fine (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_ETLEV 0x6
-/* enum: TX Slew Rate Coarse control */
+/* enum: TX Slew Rate Coarse control (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_PREDRV_DLY 0x7
-/* enum: TX Slew Rate Fine control */
+/* enum: TX Slew Rate Fine control (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_SR_SET 0x8
-/* enum: TX Termination Impedance control */
+/* enum: TX Termination Impedance control (Huntington) */
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_RT_SET 0x9
+/* enum: TX Amplitude Fine control (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TX_LEV_FINE 0xa
+/* enum: Pre-shoot Tap (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_ADV 0xb
+/* enum: De-emphasis Tap (Medford) */
+#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_TAP_DLY 0xc
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_LBN 8
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_PARAM_LANE_WIDTH 3
#define MC_CMD_KR_TUNE_TXEQ_GET_OUT_LANE_0 0x0 /* enum */
@@ -9086,8 +9921,16 @@
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP4 0x5
/* enum: DFE Tap5 (0 - max negative, 32 - zero, 63 - max positive) */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_TAP5 0x6
+/* enum: DFE DLev */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_DFE_DLEV 0x7
+/* enum: Figure of Merit */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_FOM 0x8
+/* enum: CTLE EQ Capacitor (HF Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQC 0x9
+/* enum: CTLE EQ Resistor (DC Gain) */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_CTLE_EQRES 0xa
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_LBN 8
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 4
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_LANE_WIDTH 5
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_0 0x0 /* enum */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_1 0x1 /* enum */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_2 0x2 /* enum */
@@ -9096,12 +9939,57 @@
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_5 0x5 /* enum */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_6 0x6 /* enum */
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_7 0x7 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x8 /* enum */
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 12
-#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 12
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_8 0x8 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_9 0x9 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_10 0xa /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_11 0xb /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_12 0xc /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_13 0xd /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_14 0xe /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_15 0xf /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_LANE_ALL 0x10 /* enum */
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_RESERVED_WIDTH 10
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_LBN 24
#define MC_CMD_PCIE_TUNE_RXEQ_GET_OUT_PARAM_CURRENT_WIDTH 8
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_IN msgrequest */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMIN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LENMAX 252
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_LEN(num) (4+4*(num))
+/* Requested operation */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_OFST 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_OP_LEN 1
+/* Align the arguments to 32 bits */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_OFST 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PCIE_TUNE_RSVD_LEN 3
+/* RXEQ Parameter */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_OFST 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LEN 4
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MINNUM 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_MAXNUM 62
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_LBN 0
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_ID_WIDTH 8
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_ID */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_LBN 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_LANE_WIDTH 5
+/* Enum values, see field(s): */
+/* MC_CMD_PCIE_TUNE_RXEQ_GET_OUT/PARAM_LANE */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_LBN 13
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_AUTOCAL_WIDTH 1
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_LBN 14
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED_WIDTH 2
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_LBN 16
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_PARAM_INITIAL_WIDTH 8
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_LBN 24
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_IN_RESERVED2_WIDTH 8
+
+/* MC_CMD_PCIE_TUNE_RXEQ_SET_OUT msgresponse */
+#define MC_CMD_PCIE_TUNE_RXEQ_SET_OUT_LEN 0
+
/* MC_CMD_PCIE_TUNE_TXEQ_GET_IN msgrequest */
#define MC_CMD_PCIE_TUNE_TXEQ_GET_IN_LEN 4
/* Requested operation */
@@ -9176,6 +10064,7 @@
/***********************************/
/* MC_CMD_LICENSING
* Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - not used for V3 licensing
*/
#define MC_CMD_LICENSING 0xf3
@@ -9220,6 +10109,93 @@
/***********************************/
+/* MC_CMD_LICENSING_V3
+ * Operations on the NVRAM_PARTITION_TYPE_LICENSE application license partition
+ * - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_V3 0xd0
+
+#define MC_CMD_0xd0_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_V3_IN msgrequest */
+#define MC_CMD_LICENSING_V3_IN_LEN 4
+/* identifies the type of operation requested */
+#define MC_CMD_LICENSING_V3_IN_OP_OFST 0
+/* enum: re-read and apply licenses after a license key partition update; note
+ * that this operation returns a zero-length response
+ */
+#define MC_CMD_LICENSING_V3_IN_OP_UPDATE_LICENSE 0x0
+/* enum: report counts of installed licenses */
+#define MC_CMD_LICENSING_V3_IN_OP_REPORT_LICENSE 0x1
+
+/* MC_CMD_LICENSING_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_V3_OUT_LEN 88
+/* count of keys which are valid */
+#define MC_CMD_LICENSING_V3_OUT_VALID_KEYS_OFST 0
+/* sum of UNVERIFIABLE_KEYS + WRONG_NODE_KEYS (for compatibility with
+ * MC_CMD_FC_OP_LICENSE)
+ */
+#define MC_CMD_LICENSING_V3_OUT_INVALID_KEYS_OFST 4
+/* count of keys which are invalid due to being unverifiable */
+#define MC_CMD_LICENSING_V3_OUT_UNVERIFIABLE_KEYS_OFST 8
+/* count of keys which are invalid due to being for the wrong node */
+#define MC_CMD_LICENSING_V3_OUT_WRONG_NODE_KEYS_OFST 12
+/* licensing state (for diagnostics; the exact meaning of the bits in this
+ * field are private to the firmware)
+ */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_STATE_OFST 16
+/* licensing subsystem self-test report (for manftest) */
+#define MC_CMD_LICENSING_V3_OUT_LICENSING_SELF_TEST_OFST 20
+/* enum: licensing subsystem self-test failed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_FAIL 0x0
+/* enum: licensing subsystem self-test passed */
+#define MC_CMD_LICENSING_V3_OUT_SELF_TEST_PASS 0x1
+/* bitmask of licensed applications */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_LO_OFST 24
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_APPS_HI_OFST 28
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_OFST 32
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_0_LEN 24
+/* bitmask of licensed features */
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LEN 8
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_LO_OFST 56
+#define MC_CMD_LICENSING_V3_OUT_LICENSED_FEATURES_HI_OFST 60
+/* reserved for future use */
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_OFST 64
+#define MC_CMD_LICENSING_V3_OUT_RESERVED_1_LEN 24
+
+
+/***********************************/
+/* MC_CMD_LICENSING_GET_ID_V3
+ * Get ID and type from the NVRAM_PARTITION_TYPE_LICENSE application license
+ * partition - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSING_GET_ID_V3 0xd1
+
+#define MC_CMD_0xd1_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSING_GET_ID_V3_IN msgrequest */
+#define MC_CMD_LICENSING_GET_ID_V3_IN_LEN 0
+
+/* MC_CMD_LICENSING_GET_ID_V3_OUT msgresponse */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMIN 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LENMAX 252
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LEN(num) (8+1*(num))
+/* type of license (eg 3) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_TYPE_OFST 0
+/* length of the license ID (in bytes) */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LENGTH_OFST 4
+/* the unique license ID of the adapter */
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_OFST 8
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_LEN 1
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MINNUM 0
+#define MC_CMD_LICENSING_GET_ID_V3_OUT_LICENSE_ID_MAXNUM 244
+
+
+/***********************************/
/* MC_CMD_MC2MC_PROXY
* Execute an arbitrary MCDI command on the slave MC of a dual-core device.
* This will fail on a single-core system.
@@ -9239,7 +10215,7 @@
/* MC_CMD_GET_LICENSED_APP_STATE
* Query the state of an individual licensed application. (Note that the actual
* state may be invalidated by the MC_CMD_LICENSING OP_UPDATE_LICENSE operation
- * or a reboot of the MC.)
+ * or a reboot of the MC.) Not used for V3 licensing
*/
#define MC_CMD_GET_LICENSED_APP_STATE 0xf5
@@ -9261,8 +10237,68 @@
/***********************************/
+/* MC_CMD_GET_LICENSED_V3_APP_STATE
+ * Query the state of an individual licensed application. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE 0xd2
+
+#define MC_CMD_0xd2_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_LEN 8
+/* application ID to query (LICENSED_V3_APPS_xxx) expressed as a single bit
+ * mask
+ */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LEN 8
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_IN_APP_ID_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_APP_STATE_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LEN 4
+/* state of this application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_STATE_OFST 0
+/* enum: no (or invalid) license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_NOT_LICENSED 0x0
+/* enum: a valid license is present for the application */
+#define MC_CMD_GET_LICENSED_V3_APP_STATE_OUT_LICENSED 0x1
+
+
+/***********************************/
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES
+ * Query the state of an one or more licensed features. (Note that the actual
+ * state may be invalidated by the MC_CMD_LICENSING_V3 OP_UPDATE_LICENSE
+ * operation or a reboot of the MC.) Used for V3 licensing (Medford)
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES 0xd3
+
+#define MC_CMD_0xd3_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN msgrequest */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_LEN 8
+/* features to query (LICENSED_V3_FEATURES_xxx) expressed as a mask with one or
+ * more bits set
+ */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_IN_FEATURES_HI_OFST 4
+
+/* MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT msgresponse */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_LEN 8
+/* states of these features - bit set for licensed, clear for not licensed */
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LEN 8
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_LO_OFST 0
+#define MC_CMD_GET_LICENSED_V3_FEATURE_STATES_OUT_STATES_HI_OFST 4
+
+
+/***********************************/
/* MC_CMD_LICENSED_APP_OP
- * Perform an action for an individual licensed application.
+ * Perform an action for an individual licensed application - not used for V3
+ * licensing.
*/
#define MC_CMD_LICENSED_APP_OP 0xf6
@@ -9328,6 +10364,67 @@
/***********************************/
+/* MC_CMD_LICENSED_V3_VALIDATE_APP
+ * Perform validation for an individual licensed application - V3 licensing
+ * (Medford)
+ */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP 0xd4
+
+#define MC_CMD_0xd4_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_IN msgrequest */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_LEN 72
+/* application ID expressed as a single bit mask */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LEN 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_LO_OFST 0
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_APP_ID_HI_OFST 4
+/* challenge for validation */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_OFST 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_IN_CHALLENGE_LEN 64
+
+/* MC_CMD_LICENSED_V3_VALIDATE_APP_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_LEN 72
+/* application expiry time */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_TIME_OFST 0
+/* application expiry units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNITS_OFST 4
+/* enum: expiry units are accounting units */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_ACC 0x0
+/* enum: expiry units are calendar days */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_EXPIRY_UNIT_DAYS 0x1
+/* validation response to challenge */
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_OFST 8
+#define MC_CMD_LICENSED_V3_VALIDATE_APP_OUT_RESPONSE_LEN 64
+
+
+/***********************************/
+/* MC_CMD_LICENSED_V3_MASK_FEATURES
+ * Mask features - V3 licensing (Medford)
+ */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES 0xd5
+
+#define MC_CMD_0xd5_PRIVILEGE_CTG SRIOV_CTG_GENERAL
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_IN msgrequest */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_LEN 12
+/* mask to be applied to features to be changed */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LEN 8
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_LO_OFST 0
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_MASK_HI_OFST 4
+/* whether to turn on or turn off the masked features */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_FLAG_OFST 8
+/* enum: turn the features off */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_OFF 0x0
+/* enum: turn the features back on */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_IN_ON 0x1
+
+/* MC_CMD_LICENSED_V3_MASK_FEATURES_OUT msgresponse */
+#define MC_CMD_LICENSED_V3_MASK_FEATURES_OUT_LEN 0
+
+
+/***********************************/
/* MC_CMD_SET_PORT_SNIFF_CONFIG
* Configure RX port sniffing for the physical port associated with the calling
* function. Only a privileged function may change the port sniffing
@@ -9696,12 +10793,27 @@
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD 0x4 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP 0x8 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS 0x10 /* enum */
-#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20 /* enum */
+/* enum: Deprecated. Equivalent to MAC_SPOOFING_TX combined with CHANGE_MAC. */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING 0x20
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST 0x40 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST 0x80 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST 0x100 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST 0x200 /* enum */
#define MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS 0x400 /* enum */
+/* enum: Allows to set the TX packets' source MAC address to any arbitrary MAC
+ * adress.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING_TX 0x800
+/* enum: Privilege that allows a Function to change the MAC address configured
+ * in its associated vAdapter/vPort.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_CHANGE_MAC 0x1000
+/* enum: Privilege that allows a Function to install filters that specify VLANs
+ * that are not in the permit list for the associated vPort. This privilege is
+ * primarily to support ESX where vPorts are created that restrict traffic to
+ * only a set of permitted VLANs. See the vPort flag FLAG_VLAN_RESTRICT.
+ */
+#define MC_CMD_PRIVILEGE_MASK_IN_GRP_UNRESTRICTED_VLAN 0x2000
/* enum: Set this bit to indicate that a new privilege mask is to be set,
* otherwise the command will only read the existing mask.
*/
@@ -9951,7 +11063,7 @@
/* Sector type */
#define MC_CMD_XPM_WRITE_SECTOR_IN_TYPE_OFST 4
/* Enum values, see field(s): */
-/* MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
+/* MC_CMD_XPM_READ_SECTOR/MC_CMD_XPM_READ_SECTOR_OUT/TYPE */
/* Sector size */
#define MC_CMD_XPM_WRITE_SECTOR_IN_SIZE_OFST 8
/* Sector data */
@@ -10067,4 +11179,123 @@
#define MC_CMD_XPM_WRITE_TEST_OUT_LEN 0
+/***********************************/
+/* MC_CMD_EXEC_SIGNED
+ * Check the CMAC of the contents of IMEM and DMEM against the value supplied
+ * and if correct begin execution from the start of IMEM. The caller supplies a
+ * key ID, the length of IMEM and DMEM to validate and the expected CMAC. CMAC
+ * computation runs from the start of IMEM, and from the start of DMEM + 16k,
+ * to match flash booting. The command will respond with EINVAL if the CMAC
+ * does match, otherwise it will respond with success before it jumps to IMEM.
+ */
+#define MC_CMD_EXEC_SIGNED 0x10c
+
+#define MC_CMD_0x10c_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_EXEC_SIGNED_IN msgrequest */
+#define MC_CMD_EXEC_SIGNED_IN_LEN 28
+/* the length of code to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_CODELEN_OFST 0
+/* the length of date to include in the CMAC */
+#define MC_CMD_EXEC_SIGNED_IN_DATALEN_OFST 4
+/* the XPM sector containing the key to use */
+#define MC_CMD_EXEC_SIGNED_IN_KEYSECTOR_OFST 8
+/* the expected CMAC value */
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_OFST 12
+#define MC_CMD_EXEC_SIGNED_IN_CMAC_LEN 16
+
+/* MC_CMD_EXEC_SIGNED_OUT msgresponse */
+#define MC_CMD_EXEC_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_PREPARE_SIGNED
+ * Prepare to upload a signed image. This will scrub the specified length of
+ * the data region, which must be at least as large as the DATALEN supplied to
+ * MC_CMD_EXEC_SIGNED.
+ */
+#define MC_CMD_PREPARE_SIGNED 0x10d
+
+#define MC_CMD_0x10d_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_PREPARE_SIGNED_IN msgrequest */
+#define MC_CMD_PREPARE_SIGNED_IN_LEN 4
+/* the length of data area to clear */
+#define MC_CMD_PREPARE_SIGNED_IN_DATALEN_OFST 0
+
+/* MC_CMD_PREPARE_SIGNED_OUT msgresponse */
+#define MC_CMD_PREPARE_SIGNED_OUT_LEN 0
+
+
+/***********************************/
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS
+ * Configure UDP ports for tunnel encapsulation hardware acceleration. The
+ * parser-dispatcher will attempt to parse traffic on these ports as tunnel
+ * encapsulation PDUs and filter them using the tunnel encapsulation filter
+ * chain rather than the standard filter chain. Note that this command can
+ * cause all functions to see a reset. (Available on Medford only.)
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS 0x117
+
+#define MC_CMD_0x117_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN msgrequest */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMIN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LENMAX 68
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_LEN(num) (4+4*(num))
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_UNLOADING_WIDTH 1
+/* The number of entries in the ENTRIES array */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_OFST 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_NUM_ENTRIES_LEN 2
+/* Entries defining the UDP port to protocol mapping, each laid out as a
+ * TUNNEL_ENCAP_UDP_PORT_ENTRY
+ */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_OFST 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_LEN 4
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MINNUM 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM 16
+
+/* MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT msgresponse */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_LEN 2
+/* Flags */
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_OFST 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_FLAGS_LEN 2
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_LBN 0
+#define MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_OUT_RESETTING_WIDTH 1
+
+
+/***********************************/
+/* MC_CMD_RX_BALANCING
+ * Configure a port upconverter to distribute the packets on both RX engines.
+ * Packets are distributed based on a table with the destination vFIFO. The
+ * index of the table is a hash of source and destination of IPV4 and VLAN
+ * priority.
+ */
+#define MC_CMD_RX_BALANCING 0x118
+
+#define MC_CMD_0x118_PRIVILEGE_CTG SRIOV_CTG_ADMIN
+
+/* MC_CMD_RX_BALANCING_IN msgrequest */
+#define MC_CMD_RX_BALANCING_IN_LEN 4
+/* The RX port whose upconverter table will be modified */
+#define MC_CMD_RX_BALANCING_IN_PORT_OFST 0
+#define MC_CMD_RX_BALANCING_IN_PORT_LEN 1
+/* The VLAN priority associated to the table index and vFIFO */
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_OFST 1
+#define MC_CMD_RX_BALANCING_IN_PRIORITY_LEN 1
+/* The resulting bit of SRC^DST for indexing the table */
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_OFST 2
+#define MC_CMD_RX_BALANCING_IN_SRC_DST_LEN 1
+/* The RX engine to which the vFIFO in the table entry will point to */
+#define MC_CMD_RX_BALANCING_IN_ENG_OFST 3
+#define MC_CMD_RX_BALANCING_IN_ENG_LEN 1
+
+/* MC_CMD_RX_BALANCING_OUT msgresponse */
+#define MC_CMD_RX_BALANCING_OUT_LEN 0
+
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 7f295c4d7b80..2a9228a6e4a0 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -189,11 +189,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
case MC_CMD_MEDIA_XFP:
case MC_CMD_MEDIA_SFP_PLUS:
- result |= SUPPORTED_FIBRE;
- break;
-
case MC_CMD_MEDIA_QSFP_PLUS:
result |= SUPPORTED_FIBRE;
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ result |= SUPPORTED_1000baseT_Full;
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
+ result |= SUPPORTED_10000baseT_Full;
if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
result |= SUPPORTED_40000baseCR4_Full;
break;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 38c422321cda..9ff062a36ea8 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -403,6 +403,8 @@ enum efx_sync_events_state {
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score
+ * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
+ * indexed by filter ID
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
@@ -446,6 +448,8 @@ struct efx_channel {
unsigned int irq_mod_score;
#ifdef CONFIG_RFS_ACCEL
unsigned int rfs_filters_added;
+#define RPS_FLOW_ID_INVALID 0xFFFFFFFF
+ u32 *rps_flow_id;
#endif
unsigned n_rx_tobe_disc;
@@ -864,6 +868,7 @@ struct vfdi_status;
* be held to modify it.
* @port_initialized: Port initialized?
* @net_dev: Operating system network device. Consider holding the rtnl lock
+ * @fixed_features: Features which cannot be turned off
* @stats_buffer: DMA buffer for statistics
* @phy_type: PHY type
* @phy_op: PHY interface
@@ -889,9 +894,9 @@ struct vfdi_status;
* @filter_sem: Filter table rw_semaphore, for freeing the table
* @filter_lock: Filter table lock, for mere content changes
* @filter_state: Architecture-dependent filter table state
- * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
- * indexed by filter ID
- * @rps_expire_index: Next index to check for expiry in @rps_flow_id
+ * @rps_expire_channel: Next channel to check for expiry
+ * @rps_expire_index: Next index to check for expiry in
+ * @rps_expire_channel's @rps_flow_id
* @active_queues: Count of RX and TX queues that haven't been flushed and drained.
* @rxq_flush_pending: Count of number of receive queues that need to be flushed.
* Decremented when the efx_flush_rx_queue() is called.
@@ -912,7 +917,6 @@ struct vfdi_status;
* @stats_lock: Statistics update lock. Must be held when calling
* efx_nic_type::{update,start,stop}_stats.
* @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
- * @mc_promisc: Whether in multicast promiscuous mode when last changed
*
* This is stored in the private area of the &struct net_device.
*/
@@ -1004,6 +1008,8 @@ struct efx_nic {
bool port_initialized;
struct net_device *net_dev;
+ netdev_features_t fixed_features;
+
struct efx_buffer stats_buffer;
u64 rx_nodesc_drops_total;
u64 rx_nodesc_drops_while_down;
@@ -1035,7 +1041,7 @@ struct efx_nic {
spinlock_t filter_lock;
void *filter_state;
#ifdef CONFIG_RFS_ACCEL
- u32 *rps_flow_id;
+ unsigned int rps_expire_channel;
unsigned int rps_expire_index;
#endif
@@ -1061,7 +1067,6 @@ struct efx_nic {
int last_irq_cpu;
spinlock_t stats_lock;
atomic_t n_rx_noskb_drops;
- bool mc_promisc;
};
static inline int efx_dev_registered(struct efx_nic *efx)
@@ -1329,6 +1334,8 @@ struct efx_nic_type {
int (*ptp_set_ts_config)(struct efx_nic *efx,
struct hwtstamp_config *init);
int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
+ int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
+ int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
int (*sriov_init)(struct efx_nic *efx);
void (*sriov_fini)(struct efx_nic *efx);
bool (*sriov_wanted)(struct efx_nic *efx);
@@ -1517,4 +1524,16 @@ static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
}
+/* Get all supported features.
+ * If a feature is not fixed, it is present in hw_features.
+ * If a feature is fixed, it does not present in hw_features, but
+ * always in features.
+ */
+static inline netdev_features_t efx_supported_features(const struct efx_nic *efx)
+{
+ const struct net_device *net_dev = efx->net_dev;
+
+ return net_dev->features | net_dev->hw_features;
+}
+
#endif /* EFX_NET_DRIVER_H */
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 0b536e27d3b2..96944c3c9d14 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -519,6 +519,9 @@ enum {
#ifdef CONFIG_SFC_SRIOV
* @vf: Pointer to VF data structure
#endif
+ * @vport_mac: The MAC address on the vport, only for PFs; VFs will be zero
+ * @vlan_list: List of VLANs added over the interface. Serialised by vlan_lock.
+ * @vlan_lock: Lock to serialize access to vlan_list.
*/
struct efx_ef10_nic_data {
struct efx_buffer mcdi_buf;
@@ -550,6 +553,8 @@ struct efx_ef10_nic_data {
struct ef10_vf *vf;
#endif
u8 vport_mac[ETH_ALEN];
+ struct list_head vlan_list;
+ struct mutex vlan_lock;
};
int efx_init_sriov(void);
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8956995b2fe7..02b0b5272c14 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -842,33 +842,18 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_channel *channel;
struct efx_filter_spec spec;
- const __be16 *ports;
- __be16 ether_type;
- int nhoff;
+ struct flow_keys fk;
int rc;
- /* The core RPS/RFS code has already parsed and validated
- * VLAN, IP and transport headers. We assume they are in the
- * header area.
- */
-
- if (skb->protocol == htons(ETH_P_8021Q)) {
- const struct vlan_hdr *vh =
- (const struct vlan_hdr *)skb->data;
+ if (flow_id == RPS_FLOW_ID_INVALID)
+ return -EINVAL;
- /* We can't filter on the IP 5-tuple and the vlan
- * together, so just strip the vlan header and filter
- * on the IP part.
- */
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < sizeof(*vh));
- ether_type = vh->h_vlan_encapsulated_proto;
- nhoff = sizeof(struct vlan_hdr);
- } else {
- ether_type = skb->protocol;
- nhoff = 0;
- }
+ if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
+ return -EPROTONOSUPPORT;
- if (ether_type != htons(ETH_P_IP) && ether_type != htons(ETH_P_IPV6))
+ if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6))
+ return -EPROTONOSUPPORT;
+ if (fk.control.flags & FLOW_DIS_IS_FRAGMENT)
return -EPROTONOSUPPORT;
efx_filter_init_rx(&spec, EFX_FILTER_PRI_HINT,
@@ -878,56 +863,41 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
- spec.ether_type = ether_type;
-
- if (ether_type == htons(ETH_P_IP)) {
- const struct iphdr *ip =
- (const struct iphdr *)(skb->data + nhoff);
-
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + sizeof(*ip));
- if (ip_is_fragment(ip))
- return -EPROTONOSUPPORT;
- spec.ip_proto = ip->protocol;
- spec.rem_host[0] = ip->saddr;
- spec.loc_host[0] = ip->daddr;
- EFX_BUG_ON_PARANOID(skb_headlen(skb) < nhoff + 4 * ip->ihl + 4);
- ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
+ spec.ether_type = fk.basic.n_proto;
+ spec.ip_proto = fk.basic.ip_proto;
+
+ if (fk.basic.n_proto == htons(ETH_P_IP)) {
+ spec.rem_host[0] = fk.addrs.v4addrs.src;
+ spec.loc_host[0] = fk.addrs.v4addrs.dst;
} else {
- const struct ipv6hdr *ip6 =
- (const struct ipv6hdr *)(skb->data + nhoff);
-
- EFX_BUG_ON_PARANOID(skb_headlen(skb) <
- nhoff + sizeof(*ip6) + 4);
- spec.ip_proto = ip6->nexthdr;
- memcpy(spec.rem_host, &ip6->saddr, sizeof(ip6->saddr));
- memcpy(spec.loc_host, &ip6->daddr, sizeof(ip6->daddr));
- ports = (const __be16 *)(ip6 + 1);
+ memcpy(spec.rem_host, &fk.addrs.v6addrs.src, sizeof(struct in6_addr));
+ memcpy(spec.loc_host, &fk.addrs.v6addrs.dst, sizeof(struct in6_addr));
}
- spec.rem_port = ports[0];
- spec.loc_port = ports[1];
+ spec.rem_port = fk.ports.src;
+ spec.loc_port = fk.ports.dst;
rc = efx->type->filter_rfs_insert(efx, &spec);
if (rc < 0)
return rc;
/* Remember this so we can check whether to expire the filter later */
- efx->rps_flow_id[rc] = flow_id;
- channel = efx_get_channel(efx, skb_get_rx_queue(skb));
+ channel = efx_get_channel(efx, rxq_index);
+ channel->rps_flow_id[rc] = flow_id;
++channel->rfs_filters_added;
- if (ether_type == htons(ETH_P_IP))
+ if (spec.ether_type == htons(ETH_P_IP))
netif_info(efx, rx_status, efx->net_dev,
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
- spec.rem_host, ntohs(ports[0]), spec.loc_host,
- ntohs(ports[1]), rxq_index, flow_id, rc);
+ spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+ ntohs(spec.loc_port), rxq_index, flow_id, rc);
else
netif_info(efx, rx_status, efx->net_dev,
"steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n",
(spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
- spec.rem_host, ntohs(ports[0]), spec.loc_host,
- ntohs(ports[1]), rxq_index, flow_id, rc);
+ spec.rem_host, ntohs(spec.rem_port), spec.loc_host,
+ ntohs(spec.loc_port), rxq_index, flow_id, rc);
return rc;
}
@@ -935,24 +905,34 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
bool __efx_filter_rfs_expire(struct efx_nic *efx, unsigned int quota)
{
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
- unsigned int index, size;
+ unsigned int channel_idx, index, size;
u32 flow_id;
if (!spin_trylock_bh(&efx->filter_lock))
return false;
expire_one = efx->type->filter_rfs_expire_one;
+ channel_idx = efx->rps_expire_channel;
index = efx->rps_expire_index;
size = efx->type->max_rx_ip_filters;
while (quota--) {
- flow_id = efx->rps_flow_id[index];
- if (expire_one(efx, flow_id, index))
+ struct efx_channel *channel = efx_get_channel(efx, channel_idx);
+ flow_id = channel->rps_flow_id[index];
+
+ if (flow_id != RPS_FLOW_ID_INVALID &&
+ expire_one(efx, flow_id, index)) {
netif_info(efx, rx_status, efx->net_dev,
- "expired filter %d [flow %u]\n",
- index, flow_id);
- if (++index == size)
+ "expired filter %d [queue %u flow %u]\n",
+ index, channel_idx, flow_id);
+ channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
+ }
+ if (++index == size) {
+ if (++channel_idx == efx->n_channels)
+ channel_idx = 0;
index = 0;
+ }
}
+ efx->rps_expire_channel = channel_idx;
efx->rps_expire_index = index;
spin_unlock_bh(&efx->filter_lock);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 5eac523b4b0c..aaa80f13859b 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -708,7 +708,7 @@ static int meth_tx(struct sk_buff *skb, struct net_device *dev)
mace->eth.dma_ctrl = priv->dma_ctrl;
meth_add_to_tx_ring(priv, skb);
- dev->trans_start = jiffies; /* save the timestamp */
+ netif_trans_update(dev); /* save the timestamp */
/* If TX ring is full, tell the upper layer to stop sending packets */
if (meth_tx_full(dev)) {
@@ -756,7 +756,7 @@ static void meth_tx_timeout(struct net_device *dev)
/* Enable interrupt */
spin_unlock_irqrestore(&priv->meth_lock, flags);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index fd812d2e5e1c..95001ee408ab 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1575,7 +1575,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
spin_unlock_irqrestore(&sis_priv->lock, flags);
- net_dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(net_dev); /* prevent tx timeout */
/* load Transmit Descriptor Register */
sw32(txdp, sis_priv->tx_ring_dma);
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 443f1da9fc9e..7186b89269ad 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -889,7 +889,7 @@ static void epic_tx_timeout(struct net_device *dev)
ew32(COMMAND, TxQueued);
}
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
if (!ep->tx_full)
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index a733868a43aa..cb49c9654f0a 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -499,7 +499,7 @@ static void smc911x_hardware_send_pkt(struct net_device *dev)
/* DMA complete IRQ will free buffer and set jiffies */
#else
SMC_PUSH_DATA(lp, buf, len);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev_kfree_skb_irq(skb);
#endif
if (!lp->tx_throttle) {
@@ -1189,7 +1189,7 @@ smc911x_tx_dma_irq(void *data)
DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, dev, "TX DMA irq handler\n");
BUG_ON(skb == NULL);
dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev_kfree_skb_irq(skb);
lp->current_tx_skb = NULL;
if (lp->pending_tx_skb != NULL)
@@ -1283,7 +1283,7 @@ static void smc911x_timeout(struct net_device *dev)
schedule_work(&lp->phy_configure);
/* We can accept TX packets again */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c
index 664f596971b5..d496888b85d3 100644
--- a/drivers/net/ethernet/smsc/smc9194.c
+++ b/drivers/net/ethernet/smsc/smc9194.c
@@ -663,7 +663,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
lp->saved_skb = NULL;
dev_kfree_skb_any (skb);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
/* we can send another packet */
netif_wake_queue(dev);
@@ -1104,7 +1104,7 @@ static void smc_timeout(struct net_device *dev)
/* "kick" the adaptor */
smc_reset( dev->base_addr );
smc_enable( dev->base_addr );
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
/* clear anything saved */
((struct smc_local *)netdev_priv(dev))->saved_skb = NULL;
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 3449893aea8d..db3c696d7002 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1172,7 +1172,7 @@ static void smc_hardware_send_packet(struct net_device * dev)
smc->saved_skb = NULL;
dev_kfree_skb_irq(skb);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
netif_start_queue(dev);
}
@@ -1187,7 +1187,7 @@ static void smc_tx_timeout(struct net_device *dev)
inw(ioaddr)&0xff, inw(ioaddr + 2));
dev->stats.tx_errors++;
smc_reset(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
smc->saved_skb = NULL;
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index c5ed27c54724..503a3b6dce91 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -619,7 +619,7 @@ static void smc_hardware_send_pkt(unsigned long data)
SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
smc_special_unlock(&lp->lock, flags);
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
@@ -1364,7 +1364,7 @@ static void smc_timeout(struct net_device *dev)
schedule_work(&lp->phy_configure);
/* We can accept TX packets again */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -2195,6 +2195,12 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
}
}
+static const struct acpi_device_id smc91x_acpi_match[] = {
+ { "LNRO0003", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, smc91x_acpi_match);
+
#if IS_BUILTIN(CONFIG_OF)
static const struct of_device_id smc91x_match[] = {
{ .compatible = "smsc,lan91c94", },
@@ -2269,12 +2275,18 @@ static int smc_drv_probe(struct platform_device *pdev)
if (pd) {
memcpy(&lp->cfg, pd, sizeof(lp->cfg));
lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
+
+ if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) {
+ dev_err(&pdev->dev,
+ "at least one of 8-bit or 16-bit access support is required.\n");
+ ret = -ENXIO;
+ goto out_free_netdev;
+ }
}
#if IS_BUILTIN(CONFIG_OF)
match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev);
if (match) {
- struct device_node *np = pdev->dev.of_node;
u32 val;
/* Optional pwrdwn GPIO configured? */
@@ -2300,7 +2312,8 @@ static int smc_drv_probe(struct platform_device *pdev)
usleep_range(750, 1000);
/* Combination of IO widths supported, default to 16-bit */
- if (!of_property_read_u32(np, "reg-io-width", &val)) {
+ if (!device_property_read_u32(&pdev->dev, "reg-io-width",
+ &val)) {
if (val & 1)
lp->cfg.flags |= SMC91X_USE_8BIT;
if ((val == 0) || (val & 2))
@@ -2478,7 +2491,8 @@ static struct platform_driver smc_driver = {
.driver = {
.name = CARDNAME,
.pm = &smc_drv_pm_ops,
- .of_match_table = of_match_ptr(smc91x_match),
+ .of_match_table = of_match_ptr(smc91x_match),
+ .acpi_match_table = smc91x_acpi_match,
},
};
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 1a55c7976df0..ea8465467469 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -37,6 +37,27 @@
#include <linux/smc91x.h>
/*
+ * Any 16-bit access is performed with two 8-bit accesses if the hardware
+ * can't do it directly. Most registers are 16-bit so those are mandatory.
+ */
+#define SMC_outw_b(x, a, r) \
+ do { \
+ unsigned int __val16 = (x); \
+ unsigned int __reg = (r); \
+ SMC_outb(__val16, a, __reg); \
+ SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \
+ } while (0)
+
+#define SMC_inw_b(a, r) \
+ ({ \
+ unsigned int __val16; \
+ unsigned int __reg = r; \
+ __val16 = SMC_inb(a, __reg); \
+ __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
+ __val16; \
+ })
+
+/*
* Define your architecture specific bus configuration parameters here.
*/
@@ -55,10 +76,30 @@
#define SMC_IO_SHIFT (lp->io_shift)
#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inw(a, r) \
+ ({ \
+ unsigned int __smc_r = r; \
+ SMC_16BIT(lp) ? readw((a) + __smc_r) : \
+ SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \
+ ({ BUG(); 0; }); \
+ })
+
#define SMC_inl(a, r) readl((a) + (r))
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) \
+ do { \
+ unsigned int __v = v, __smc_r = r; \
+ if (SMC_16BIT(lp)) \
+ __SMC_outw(__v, a, __smc_r); \
+ else if (SMC_8BIT(lp)) \
+ SMC_outw_b(__v, a, __smc_r); \
+ else \
+ BUG(); \
+ } while (0)
+
#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insb(a, r, p, l) readsb((a) + (r), p, l)
+#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l)
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
@@ -66,7 +107,7 @@
#define SMC_IRQ_FLAGS (-1) /* from resource */
/* We actually can't write halfwords properly if not word aligned */
-static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
{
if ((machine_is_mainstone() || machine_is_stargate2() ||
machine_is_pxa_idp()) && reg & 2) {
@@ -416,24 +457,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
#if ! SMC_CAN_USE_16BIT
-/*
- * Any 16-bit access is performed with two 8-bit accesses if the hardware
- * can't do it directly. Most registers are 16-bit so those are mandatory.
- */
-#define SMC_outw(x, ioaddr, reg) \
- do { \
- unsigned int __val16 = (x); \
- SMC_outb( __val16, ioaddr, reg ); \
- SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
- } while (0)
-#define SMC_inw(ioaddr, reg) \
- ({ \
- unsigned int __val16; \
- __val16 = SMC_inb( ioaddr, reg ); \
- __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
- __val16; \
- })
-
+#define SMC_outw(x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg)
+#define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg)
#define SMC_insw(a, r, p, l) BUG()
#define SMC_outsw(a, r, p, l) BUG()
@@ -445,7 +470,9 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
#endif
#if ! SMC_CAN_USE_8BIT
+#undef SMC_inb
#define SMC_inb(ioaddr, reg) ({ BUG(); 0; })
+#undef SMC_outb
#define SMC_outb(x, ioaddr, reg) BUG()
#define SMC_insb(a, r, p, l) BUG()
#define SMC_outsb(a, r, p, l) BUG()
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8af25563f627..4f8910b7db2e 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -114,9 +114,7 @@ struct smsc911x_data {
/* spinlock to ensure register accesses are serialised */
spinlock_t dev_lock;
- struct phy_device *phy_dev;
struct mii_bus *mii_bus;
- int phy_irq[PHY_MAX_ADDR];
unsigned int using_extphy;
int last_duplex;
int last_carrier;
@@ -834,7 +832,7 @@ static int smsc911x_phy_reset(struct smsc911x_data *pdata)
static int smsc911x_phy_loopbacktest(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- struct phy_device *phy_dev = pdata->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
int result = -EIO;
unsigned int i, val;
unsigned long flags;
@@ -904,7 +902,8 @@ static int smsc911x_phy_loopbacktest(struct net_device *dev)
static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata)
{
- struct phy_device *phy_dev = pdata->phy_dev;
+ struct net_device *ndev = pdata->dev;
+ struct phy_device *phy_dev = ndev->phydev;
u32 afc = smsc911x_reg_read(pdata, AFC_CFG);
u32 flow;
unsigned long flags;
@@ -945,7 +944,7 @@ static void smsc911x_phy_update_flowcontrol(struct smsc911x_data *pdata)
static void smsc911x_phy_adjust_link(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- struct phy_device *phy_dev = pdata->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
unsigned long flags;
int carrier;
@@ -1038,7 +1037,6 @@ static int smsc911x_mii_probe(struct net_device *dev)
SUPPORTED_Asym_Pause);
phydev->advertising = phydev->supported;
- pdata->phy_dev = phydev;
pdata->last_duplex = -1;
pdata->last_carrier = -1;
@@ -1073,7 +1071,6 @@ static int smsc911x_mii_init(struct platform_device *pdev,
pdata->mii_bus->priv = pdata;
pdata->mii_bus->read = smsc911x_mii_read;
pdata->mii_bus->write = smsc911x_mii_write;
- memcpy(pdata->mii_bus->irq, pdata->phy_irq, sizeof(pdata->mii_bus));
pdata->mii_bus->parent = &pdev->dev;
@@ -1102,15 +1099,8 @@ static int smsc911x_mii_init(struct platform_device *pdev,
goto err_out_free_bus_2;
}
- if (smsc911x_mii_probe(dev) < 0) {
- SMSC_WARN(pdata, probe, "Error registering mii bus");
- goto err_out_unregister_bus_3;
- }
-
return 0;
-err_out_unregister_bus_3:
- mdiobus_unregister(pdata->mii_bus);
err_out_free_bus_2:
mdiobus_free(pdata->mii_bus);
err_out_1:
@@ -1340,9 +1330,11 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
{
+ struct net_device *ndev = pdata->dev;
+ struct phy_device *phy_dev = ndev->phydev;
int rc = 0;
- if (!pdata->phy_dev)
+ if (!phy_dev)
return rc;
/* If the internal PHY is in General Power-Down mode, all, except the
@@ -1352,7 +1344,7 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
* In that case, clear the bit 0.11, so the PHY powers up and we can
* access to the phy registers.
*/
- rc = phy_read(pdata->phy_dev, MII_BMCR);
+ rc = phy_read(phy_dev, MII_BMCR);
if (rc < 0) {
SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
return rc;
@@ -1362,7 +1354,7 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
* disable the general power down-mode.
*/
if (rc & BMCR_PDOWN) {
- rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
+ rc = phy_write(phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
if (rc < 0) {
SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
return rc;
@@ -1376,12 +1368,14 @@ static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
{
+ struct net_device *ndev = pdata->dev;
+ struct phy_device *phy_dev = ndev->phydev;
int rc = 0;
- if (!pdata->phy_dev)
+ if (!phy_dev)
return rc;
- rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+ rc = phy_read(phy_dev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0) {
SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
@@ -1391,7 +1385,7 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
/* Only disable if energy detect mode is already enabled */
if (rc & MII_LAN83C185_EDPWRDOWN) {
/* Disable energy detect mode for this SMSC Transceivers */
- rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+ rc = phy_write(phy_dev, MII_LAN83C185_CTRL_STATUS,
rc & (~MII_LAN83C185_EDPWRDOWN));
if (rc < 0) {
@@ -1407,12 +1401,14 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
{
+ struct net_device *ndev = pdata->dev;
+ struct phy_device *phy_dev = ndev->phydev;
int rc = 0;
- if (!pdata->phy_dev)
+ if (!phy_dev)
return rc;
- rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+ rc = phy_read(phy_dev, MII_LAN83C185_CTRL_STATUS);
if (rc < 0) {
SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
@@ -1422,7 +1418,7 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
/* Only enable if energy detect mode is already disabled */
if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
/* Enable energy detect mode for this SMSC Transceivers */
- rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+ rc = phy_write(phy_dev, MII_LAN83C185_CTRL_STATUS,
rc | MII_LAN83C185_EDPWRDOWN);
if (rc < 0) {
@@ -1511,23 +1507,90 @@ static void smsc911x_disable_irq_chip(struct net_device *dev)
smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
}
+static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ u32 intsts = smsc911x_reg_read(pdata, INT_STS);
+ u32 inten = smsc911x_reg_read(pdata, INT_EN);
+ int serviced = IRQ_NONE;
+ u32 temp;
+
+ if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp &= (~INT_EN_SW_INT_EN_);
+ smsc911x_reg_write(pdata, INT_EN, temp);
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
+ pdata->software_irq_signal = 1;
+ smp_wmb();
+ serviced = IRQ_HANDLED;
+ }
+
+ if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
+ /* Called when there is a multicast update scheduled and
+ * it is now safe to complete the update */
+ SMSC_TRACE(pdata, intr, "RX Stop interrupt");
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
+ if (pdata->multicast_update_pending)
+ smsc911x_rx_multicast_update_workaround(pdata);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (intsts & inten & INT_STS_TDFA_) {
+ temp = smsc911x_reg_read(pdata, FIFO_INT);
+ temp |= FIFO_INT_TX_AVAIL_LEVEL_;
+ smsc911x_reg_write(pdata, FIFO_INT, temp);
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
+ netif_wake_queue(dev);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (unlikely(intsts & inten & INT_STS_RXE_)) {
+ SMSC_TRACE(pdata, intr, "RX Error interrupt");
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (likely(intsts & inten & INT_STS_RSFL_)) {
+ if (likely(napi_schedule_prep(&pdata->napi))) {
+ /* Disable Rx interrupts */
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp &= (~INT_EN_RSFL_EN_);
+ smsc911x_reg_write(pdata, INT_EN, temp);
+ /* Schedule a NAPI poll */
+ __napi_schedule(&pdata->napi);
+ } else {
+ SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
+ }
+ serviced = IRQ_HANDLED;
+ }
+
+ return serviced;
+}
+
static int smsc911x_open(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
unsigned int timeout;
unsigned int temp;
unsigned int intcfg;
+ int retval;
+ int irq_flags;
- /* if the phy is not yet registered, retry later*/
- if (!pdata->phy_dev) {
- SMSC_WARN(pdata, hw, "phy_dev is NULL");
- return -EAGAIN;
+ /* find and start the given phy */
+ if (!dev->phydev) {
+ retval = smsc911x_mii_probe(dev);
+ if (retval < 0) {
+ SMSC_WARN(pdata, probe, "Error starting phy");
+ goto out;
+ }
}
/* Reset the LAN911x */
- if (smsc911x_soft_reset(pdata)) {
+ retval = smsc911x_soft_reset(pdata);
+ if (retval) {
SMSC_WARN(pdata, hw, "soft reset failed");
- return -EIO;
+ goto mii_free_out;
}
smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
@@ -1583,6 +1646,15 @@ static int smsc911x_open(struct net_device *dev)
pdata->software_irq_signal = 0;
smp_wmb();
+ irq_flags = irq_get_trigger_type(dev->irq);
+ retval = request_irq(dev->irq, smsc911x_irqhandler,
+ irq_flags | IRQF_SHARED, dev->name, dev);
+ if (retval) {
+ SMSC_WARN(pdata, probe,
+ "Unable to claim requested irq: %d", dev->irq);
+ goto mii_free_out;
+ }
+
temp = smsc911x_reg_read(pdata, INT_EN);
temp |= INT_EN_SW_INT_EN_;
smsc911x_reg_write(pdata, INT_EN, temp);
@@ -1597,7 +1669,8 @@ static int smsc911x_open(struct net_device *dev)
if (!pdata->software_irq_signal) {
netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n",
dev->irq);
- return -ENODEV;
+ retval = -ENODEV;
+ goto irq_stop_out;
}
SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d",
dev->irq);
@@ -1610,7 +1683,7 @@ static int smsc911x_open(struct net_device *dev)
pdata->last_carrier = -1;
/* Bring the PHY up */
- phy_start(pdata->phy_dev);
+ phy_start(dev->phydev);
temp = smsc911x_reg_read(pdata, HW_CFG);
/* Preserve TX FIFO size and external PHY configuration */
@@ -1643,6 +1716,14 @@ static int smsc911x_open(struct net_device *dev)
netif_start_queue(dev);
return 0;
+
+irq_stop_out:
+ free_irq(dev->irq, dev);
+mii_free_out:
+ phy_disconnect(dev->phydev);
+ dev->phydev = NULL;
+out:
+ return retval;
}
/* Entry point for stopping the interface */
@@ -1664,9 +1745,15 @@ static int smsc911x_stop(struct net_device *dev)
dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
smsc911x_tx_update_txcounters(dev);
+ free_irq(dev->irq, dev);
+
/* Bring the PHY down */
- if (pdata->phy_dev)
- phy_stop(pdata->phy_dev);
+ if (dev->phydev) {
+ phy_stop(dev->phydev);
+ phy_disconnect(dev->phydev);
+ dev->phydev = NULL;
+ }
+ netif_carrier_off(dev);
SMSC_TRACE(pdata, ifdown, "Interface stopped");
return 0;
@@ -1808,67 +1895,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
spin_unlock_irqrestore(&pdata->mac_lock, flags);
}
-static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct smsc911x_data *pdata = netdev_priv(dev);
- u32 intsts = smsc911x_reg_read(pdata, INT_STS);
- u32 inten = smsc911x_reg_read(pdata, INT_EN);
- int serviced = IRQ_NONE;
- u32 temp;
-
- if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
- temp = smsc911x_reg_read(pdata, INT_EN);
- temp &= (~INT_EN_SW_INT_EN_);
- smsc911x_reg_write(pdata, INT_EN, temp);
- smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
- pdata->software_irq_signal = 1;
- smp_wmb();
- serviced = IRQ_HANDLED;
- }
-
- if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
- /* Called when there is a multicast update scheduled and
- * it is now safe to complete the update */
- SMSC_TRACE(pdata, intr, "RX Stop interrupt");
- smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
- if (pdata->multicast_update_pending)
- smsc911x_rx_multicast_update_workaround(pdata);
- serviced = IRQ_HANDLED;
- }
-
- if (intsts & inten & INT_STS_TDFA_) {
- temp = smsc911x_reg_read(pdata, FIFO_INT);
- temp |= FIFO_INT_TX_AVAIL_LEVEL_;
- smsc911x_reg_write(pdata, FIFO_INT, temp);
- smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
- netif_wake_queue(dev);
- serviced = IRQ_HANDLED;
- }
-
- if (unlikely(intsts & inten & INT_STS_RXE_)) {
- SMSC_TRACE(pdata, intr, "RX Error interrupt");
- smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
- serviced = IRQ_HANDLED;
- }
-
- if (likely(intsts & inten & INT_STS_RSFL_)) {
- if (likely(napi_schedule_prep(&pdata->napi))) {
- /* Disable Rx interrupts */
- temp = smsc911x_reg_read(pdata, INT_EN);
- temp &= (~INT_EN_RSFL_EN_);
- smsc911x_reg_write(pdata, INT_EN, temp);
- /* Schedule a NAPI poll */
- __napi_schedule(&pdata->napi);
- } else {
- SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
- }
- serviced = IRQ_HANDLED;
- }
-
- return serviced;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void smsc911x_poll_controller(struct net_device *dev)
{
@@ -1906,30 +1932,10 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
/* Standard ioctls for mii-tool */
static int smsc911x_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct smsc911x_data *pdata = netdev_priv(dev);
-
- if (!netif_running(dev) || !pdata->phy_dev)
+ if (!netif_running(dev) || !dev->phydev)
return -EINVAL;
- return phy_mii_ioctl(pdata->phy_dev, ifr, cmd);
-}
-
-static int
-smsc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct smsc911x_data *pdata = netdev_priv(dev);
-
- cmd->maxtxpkt = 1;
- cmd->maxrxpkt = 1;
- return phy_ethtool_gset(pdata->phy_dev, cmd);
-}
-
-static int
-smsc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct smsc911x_data *pdata = netdev_priv(dev);
-
- return phy_ethtool_sset(pdata->phy_dev, cmd);
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
@@ -1943,9 +1949,7 @@ static void smsc911x_ethtool_getdrvinfo(struct net_device *dev,
static int smsc911x_ethtool_nwayreset(struct net_device *dev)
{
- struct smsc911x_data *pdata = netdev_priv(dev);
-
- return phy_start_aneg(pdata->phy_dev);
+ return phy_start_aneg(dev->phydev);
}
static u32 smsc911x_ethtool_getmsglevel(struct net_device *dev)
@@ -1971,7 +1975,7 @@ smsc911x_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
void *buf)
{
struct smsc911x_data *pdata = netdev_priv(dev);
- struct phy_device *phy_dev = pdata->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
unsigned long flags;
unsigned int i;
unsigned int j = 0;
@@ -2117,8 +2121,6 @@ static int smsc911x_ethtool_set_eeprom(struct net_device *dev,
}
static const struct ethtool_ops smsc911x_ethtool_ops = {
- .get_settings = smsc911x_ethtool_getsettings,
- .set_settings = smsc911x_ethtool_setsettings,
.get_link = ethtool_op_get_link,
.get_drvinfo = smsc911x_ethtool_getdrvinfo,
.nway_reset = smsc911x_ethtool_nwayreset,
@@ -2130,6 +2132,8 @@ static const struct ethtool_ops smsc911x_ethtool_ops = {
.get_eeprom = smsc911x_ethtool_get_eeprom,
.set_eeprom = smsc911x_ethtool_set_eeprom,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static const struct net_device_ops smsc911x_netdev_ops = {
@@ -2310,17 +2314,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
pdata = netdev_priv(dev);
BUG_ON(!pdata);
BUG_ON(!pdata->ioaddr);
- BUG_ON(!pdata->phy_dev);
+ WARN_ON(dev->phydev);
SMSC_TRACE(pdata, ifdown, "Stopping driver");
- phy_disconnect(pdata->phy_dev);
- pdata->phy_dev = NULL;
mdiobus_unregister(pdata->mii_bus);
mdiobus_free(pdata->mii_bus);
unregister_netdev(dev);
- free_irq(dev->irq, dev);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"smsc911x-memory");
if (!res)
@@ -2405,8 +2406,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
struct smsc911x_data *pdata;
struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
struct resource *res;
- unsigned int intcfg = 0;
- int res_size, irq, irq_flags;
+ int res_size, irq;
int retval;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -2445,7 +2445,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
pdata = netdev_priv(dev);
dev->irq = irq;
- irq_flags = irq_get_trigger_type(irq);
pdata->ioaddr = ioremap_nocache(res->start, res_size);
pdata->dev = dev;
@@ -2492,43 +2491,23 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
if (retval < 0)
goto out_disable_resources;
- /* configure irq polarity and type before connecting isr */
- if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
- intcfg |= INT_CFG_IRQ_POL_;
-
- if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL)
- intcfg |= INT_CFG_IRQ_TYPE_;
-
- smsc911x_reg_write(pdata, INT_CFG, intcfg);
-
- /* Ensure interrupts are globally disabled before connecting ISR */
- smsc911x_disable_irq_chip(dev);
+ netif_carrier_off(dev);
- retval = request_irq(dev->irq, smsc911x_irqhandler,
- irq_flags | IRQF_SHARED, dev->name, dev);
+ retval = smsc911x_mii_init(pdev, dev);
if (retval) {
- SMSC_WARN(pdata, probe,
- "Unable to claim requested irq: %d", dev->irq);
+ SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
goto out_disable_resources;
}
- netif_carrier_off(dev);
-
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
- goto out_free_irq;
+ goto out_disable_resources;
} else {
SMSC_TRACE(pdata, probe,
"Network interface: \"%s\"", dev->name);
}
- retval = smsc911x_mii_init(pdev, dev);
- if (retval) {
- SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
- goto out_unregister_netdev_5;
- }
-
spin_lock_irq(&pdata->mac_lock);
/* Check if mac address has been specified when bringing interface up */
@@ -2564,10 +2543,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
return 0;
-out_unregister_netdev_5:
- unregister_netdev(dev);
-out_free_irq:
- free_irq(dev->irq, dev);
out_disable_resources:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 8594b9e8b28b..b7bfed4bc96b 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -76,7 +76,6 @@ struct smsc9420_pdata {
bool rx_csum;
u32 msg_enable;
- struct phy_device *phy_dev;
struct mii_bus *mii_bus;
int last_duplex;
int last_carrier;
@@ -226,36 +225,10 @@ static int smsc9420_eeprom_reload(struct smsc9420_pdata *pd)
/* Standard ioctls for mii-tool */
static int smsc9420_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct smsc9420_pdata *pd = netdev_priv(dev);
-
- if (!netif_running(dev) || !pd->phy_dev)
+ if (!netif_running(dev) || !dev->phydev)
return -EINVAL;
- return phy_mii_ioctl(pd->phy_dev, ifr, cmd);
-}
-
-static int smsc9420_ethtool_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct smsc9420_pdata *pd = netdev_priv(dev);
-
- if (!pd->phy_dev)
- return -ENODEV;
-
- cmd->maxtxpkt = 1;
- cmd->maxrxpkt = 1;
- return phy_ethtool_gset(pd->phy_dev, cmd);
-}
-
-static int smsc9420_ethtool_set_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- struct smsc9420_pdata *pd = netdev_priv(dev);
-
- if (!pd->phy_dev)
- return -ENODEV;
-
- return phy_ethtool_sset(pd->phy_dev, cmd);
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
@@ -283,12 +256,10 @@ static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data)
static int smsc9420_ethtool_nway_reset(struct net_device *netdev)
{
- struct smsc9420_pdata *pd = netdev_priv(netdev);
-
- if (!pd->phy_dev)
+ if (!netdev->phydev)
return -ENODEV;
- return phy_start_aneg(pd->phy_dev);
+ return phy_start_aneg(netdev->phydev);
}
static int smsc9420_ethtool_getregslen(struct net_device *dev)
@@ -302,7 +273,7 @@ smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
void *buf)
{
struct smsc9420_pdata *pd = netdev_priv(dev);
- struct phy_device *phy_dev = pd->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
unsigned int i, j = 0;
u32 *data = buf;
@@ -443,8 +414,6 @@ static int smsc9420_ethtool_set_eeprom(struct net_device *dev,
}
static const struct ethtool_ops smsc9420_ethtool_ops = {
- .get_settings = smsc9420_ethtool_get_settings,
- .set_settings = smsc9420_ethtool_set_settings,
.get_drvinfo = smsc9420_ethtool_get_drvinfo,
.get_msglevel = smsc9420_ethtool_get_msglevel,
.set_msglevel = smsc9420_ethtool_set_msglevel,
@@ -456,6 +425,8 @@ static const struct ethtool_ops smsc9420_ethtool_ops = {
.get_regs_len = smsc9420_ethtool_getregslen,
.get_regs = smsc9420_ethtool_getregs,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/* Sets the device MAC address to dev_addr */
@@ -736,7 +707,7 @@ static int smsc9420_stop(struct net_device *dev)
ulong flags;
BUG_ON(!pd);
- BUG_ON(!pd->phy_dev);
+ BUG_ON(!dev->phydev);
/* disable master interrupt */
spin_lock_irqsave(&pd->int_lock, flags);
@@ -757,10 +728,9 @@ static int smsc9420_stop(struct net_device *dev)
smsc9420_dmac_soft_reset(pd);
- phy_stop(pd->phy_dev);
+ phy_stop(dev->phydev);
- phy_disconnect(pd->phy_dev);
- pd->phy_dev = NULL;
+ phy_disconnect(dev->phydev);
mdiobus_unregister(pd->mii_bus);
mdiobus_free(pd->mii_bus);
@@ -1093,7 +1063,8 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd)
{
- struct phy_device *phy_dev = pd->phy_dev;
+ struct net_device *dev = pd->dev;
+ struct phy_device *phy_dev = dev->phydev;
u32 flow;
if (phy_dev->duplex == DUPLEX_FULL) {
@@ -1122,7 +1093,7 @@ static void smsc9420_phy_update_flowcontrol(struct smsc9420_pdata *pd)
static void smsc9420_phy_adjust_link(struct net_device *dev)
{
struct smsc9420_pdata *pd = netdev_priv(dev);
- struct phy_device *phy_dev = pd->phy_dev;
+ struct phy_device *phy_dev = dev->phydev;
int carrier;
if (phy_dev->duplex != pd->last_duplex) {
@@ -1155,7 +1126,7 @@ static int smsc9420_mii_probe(struct net_device *dev)
struct smsc9420_pdata *pd = netdev_priv(dev);
struct phy_device *phydev = NULL;
- BUG_ON(pd->phy_dev);
+ BUG_ON(dev->phydev);
/* Device only supports internal PHY at address 1 */
phydev = mdiobus_get_phy(pd->mii_bus, 1);
@@ -1179,7 +1150,6 @@ static int smsc9420_mii_probe(struct net_device *dev)
phy_attached_info(phydev);
- pd->phy_dev = phydev;
pd->last_duplex = -1;
pd->last_carrier = -1;
@@ -1440,7 +1410,7 @@ static int smsc9420_open(struct net_device *dev)
}
/* Bring the PHY up */
- phy_start(pd->phy_dev);
+ phy_start(dev->phydev);
napi_enable(&pd->napi);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index cec147d1d34f..8f06a6621ab1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -40,7 +40,7 @@ config DWMAC_GENERIC
config DWMAC_IPQ806X
tristate "QCA IPQ806x DWMAC support"
default ARCH_QCOM
- depends on OF
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
select MFD_SYSCON
help
Support for QCA IPQ806X DWMAC Ethernet.
@@ -53,7 +53,7 @@ config DWMAC_IPQ806X
config DWMAC_LPC18XX
tristate "NXP LPC18xx/43xx DWMAC support"
default ARCH_LPC18XX
- depends on OF
+ depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
select MFD_SYSCON
---help---
Support for NXP LPC18xx/43xx DWMAC Ethernet.
@@ -61,7 +61,7 @@ config DWMAC_LPC18XX
config DWMAC_MESON
tristate "Amlogic Meson dwmac support"
default ARCH_MESON
- depends on OF
+ depends on OF && (ARCH_MESON || COMPILE_TEST)
help
Support for Ethernet controller on Amlogic Meson SoCs.
@@ -72,7 +72,7 @@ config DWMAC_MESON
config DWMAC_ROCKCHIP
tristate "Rockchip dwmac support"
default ARCH_ROCKCHIP
- depends on OF
+ depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
select MFD_SYSCON
help
Support for Ethernet controller on Rockchip RK3288 SoC.
@@ -83,7 +83,7 @@ config DWMAC_ROCKCHIP
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
default ARCH_SOCFPGA
- depends on OF
+ depends on OF && (ARCH_SOCFPGA || COMPILE_TEST)
select MFD_SYSCON
help
Support for ethernet controller on Altera SOCFPGA
@@ -95,7 +95,7 @@ config DWMAC_SOCFPGA
config DWMAC_STI
tristate "STi GMAC support"
default ARCH_STI
- depends on OF
+ depends on OF && (ARCH_STI || COMPILE_TEST)
select MFD_SYSCON
---help---
Support for ethernet controller on STi SOCs.
@@ -107,7 +107,7 @@ config DWMAC_STI
config DWMAC_SUNXI
tristate "Allwinner GMAC support"
default ARCH_SUNXI
- depends on OF
+ depends on OF && (ARCH_SUNXI || COMPILE_TEST)
---help---
Support for Allwinner A20/A31 GMAC ethernet controllers.
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index b3901616f4f6..44b630cd1755 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,7 +2,8 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
- mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o $(stmmac-y)
+ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
+ dwmac4_dma.o dwmac4_lib.o dwmac4_core.o $(stmmac-y)
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
@@ -10,11 +11,12 @@ obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
-obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-socfpga.o
+obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
stmmac-platform-objs:= stmmac_platform.o
+dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
new file mode 100644
index 000000000000..2920e2ee3864
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -0,0 +1,274 @@
+/* Copyright Altera Corporation (C) 2016. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Tien Hock Loh <thloh@altera.com>
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+#include "altr_tse_pcs.h"
+
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII BIT(1)
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII BIT(2)
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK GENMASK(1, 0)
+
+#define TSE_PCS_CONTROL_AN_EN_MASK BIT(12)
+#define TSE_PCS_CONTROL_REG 0x00
+#define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9)
+#define TSE_PCS_IF_MODE_REG 0x28
+#define TSE_PCS_LINK_TIMER_0_REG 0x24
+#define TSE_PCS_LINK_TIMER_1_REG 0x26
+#define TSE_PCS_SIZE 0x40
+#define TSE_PCS_STATUS_AN_COMPLETED_MASK BIT(5)
+#define TSE_PCS_STATUS_LINK_MASK 0x0004
+#define TSE_PCS_STATUS_REG 0x02
+#define TSE_PCS_SGMII_SPEED_1000 BIT(3)
+#define TSE_PCS_SGMII_SPEED_100 BIT(2)
+#define TSE_PCS_SGMII_SPEED_10 0x0
+#define TSE_PCS_SW_RST_MASK 0x8000
+#define TSE_PCS_PARTNER_ABILITY_REG 0x0A
+#define TSE_PCS_PARTNER_DUPLEX_FULL 0x1000
+#define TSE_PCS_PARTNER_DUPLEX_HALF 0x0000
+#define TSE_PCS_PARTNER_DUPLEX_MASK 0x1000
+#define TSE_PCS_PARTNER_SPEED_MASK GENMASK(11, 10)
+#define TSE_PCS_PARTNER_SPEED_1000 BIT(11)
+#define TSE_PCS_PARTNER_SPEED_100 BIT(10)
+#define TSE_PCS_PARTNER_SPEED_10 0x0000
+#define TSE_PCS_PARTNER_SPEED_1000 BIT(11)
+#define TSE_PCS_PARTNER_SPEED_100 BIT(10)
+#define TSE_PCS_PARTNER_SPEED_10 0x0000
+#define TSE_PCS_SGMII_SPEED_MASK GENMASK(3, 2)
+#define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40
+#define TSE_PCS_SGMII_LINK_TIMER_1 0x0003
+#define TSE_PCS_SW_RESET_TIMEOUT 100
+#define TSE_PCS_USE_SGMII_AN_MASK BIT(2)
+#define TSE_PCS_USE_SGMII_ENA BIT(1)
+
+#define SGMII_ADAPTER_CTRL_REG 0x00
+#define SGMII_ADAPTER_DISABLE 0x0001
+#define SGMII_ADAPTER_ENABLE 0x0000
+
+#define AUTONEGO_LINK_TIMER 20
+
+static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
+{
+ int counter = 0;
+ u16 val;
+
+ val = readw(base + TSE_PCS_CONTROL_REG);
+ val |= TSE_PCS_SW_RST_MASK;
+ writew(val, base + TSE_PCS_CONTROL_REG);
+
+ while (counter < TSE_PCS_SW_RESET_TIMEOUT) {
+ val = readw(base + TSE_PCS_CONTROL_REG);
+ val &= TSE_PCS_SW_RST_MASK;
+ if (val == 0)
+ break;
+ counter++;
+ udelay(1);
+ }
+ if (counter >= TSE_PCS_SW_RESET_TIMEOUT) {
+ dev_err(pcs->dev, "PCS could not get out of sw reset\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs)
+{
+ int ret = 0;
+
+ writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG);
+
+ writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
+ writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
+
+ ret = tse_pcs_reset(base, pcs);
+ if (ret == 0)
+ writew(SGMII_ADAPTER_ENABLE,
+ pcs->sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+
+ return ret;
+}
+
+static void pcs_link_timer_callback(unsigned long data)
+{
+ u16 val = 0;
+ struct tse_pcs *pcs = (struct tse_pcs *)data;
+ void __iomem *tse_pcs_base = pcs->tse_pcs_base;
+ void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
+
+ val = readw(tse_pcs_base + TSE_PCS_STATUS_REG);
+ val &= TSE_PCS_STATUS_LINK_MASK;
+
+ if (val != 0) {
+ dev_dbg(pcs->dev, "Adapter: Link is established\n");
+ writew(SGMII_ADAPTER_ENABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+ } else {
+ mod_timer(&pcs->aneg_link_timer, jiffies +
+ msecs_to_jiffies(AUTONEGO_LINK_TIMER));
+ }
+}
+
+static void auto_nego_timer_callback(unsigned long data)
+{
+ u16 val = 0;
+ u16 speed = 0;
+ u16 duplex = 0;
+ struct tse_pcs *pcs = (struct tse_pcs *)data;
+ void __iomem *tse_pcs_base = pcs->tse_pcs_base;
+ void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
+
+ val = readw(tse_pcs_base + TSE_PCS_STATUS_REG);
+ val &= TSE_PCS_STATUS_AN_COMPLETED_MASK;
+
+ if (val != 0) {
+ dev_dbg(pcs->dev, "Adapter: Auto Negotiation is completed\n");
+ val = readw(tse_pcs_base + TSE_PCS_PARTNER_ABILITY_REG);
+ speed = val & TSE_PCS_PARTNER_SPEED_MASK;
+ duplex = val & TSE_PCS_PARTNER_DUPLEX_MASK;
+
+ if (speed == TSE_PCS_PARTNER_SPEED_10 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_FULL)
+ dev_dbg(pcs->dev,
+ "Adapter: Link Partner is Up - 10/Full\n");
+ else if (speed == TSE_PCS_PARTNER_SPEED_100 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_FULL)
+ dev_dbg(pcs->dev,
+ "Adapter: Link Partner is Up - 100/Full\n");
+ else if (speed == TSE_PCS_PARTNER_SPEED_1000 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_FULL)
+ dev_dbg(pcs->dev,
+ "Adapter: Link Partner is Up - 1000/Full\n");
+ else if (speed == TSE_PCS_PARTNER_SPEED_10 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_HALF)
+ dev_err(pcs->dev,
+ "Adapter does not support Half Duplex\n");
+ else if (speed == TSE_PCS_PARTNER_SPEED_100 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_HALF)
+ dev_err(pcs->dev,
+ "Adapter does not support Half Duplex\n");
+ else if (speed == TSE_PCS_PARTNER_SPEED_1000 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_HALF)
+ dev_err(pcs->dev,
+ "Adapter does not support Half Duplex\n");
+ else
+ dev_err(pcs->dev,
+ "Adapter: Invalid Partner Speed and Duplex\n");
+
+ if (duplex == TSE_PCS_PARTNER_DUPLEX_FULL &&
+ (speed == TSE_PCS_PARTNER_SPEED_10 ||
+ speed == TSE_PCS_PARTNER_SPEED_100 ||
+ speed == TSE_PCS_PARTNER_SPEED_1000))
+ writew(SGMII_ADAPTER_ENABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+ } else {
+ val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG);
+ val |= TSE_PCS_CONTROL_RESTART_AN_MASK;
+ writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG);
+
+ tse_pcs_reset(tse_pcs_base, pcs);
+ mod_timer(&pcs->aneg_link_timer, jiffies +
+ msecs_to_jiffies(AUTONEGO_LINK_TIMER));
+ }
+}
+
+static void aneg_link_timer_callback(unsigned long data)
+{
+ struct tse_pcs *pcs = (struct tse_pcs *)data;
+
+ if (pcs->autoneg == AUTONEG_ENABLE)
+ auto_nego_timer_callback(data);
+ else if (pcs->autoneg == AUTONEG_DISABLE)
+ pcs_link_timer_callback(data);
+}
+
+void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
+ unsigned int speed)
+{
+ void __iomem *tse_pcs_base = pcs->tse_pcs_base;
+ void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
+ u32 val;
+
+ writew(SGMII_ADAPTER_ENABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+
+ pcs->autoneg = phy_dev->autoneg;
+
+ if (phy_dev->autoneg == AUTONEG_ENABLE) {
+ val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG);
+ val |= TSE_PCS_CONTROL_AN_EN_MASK;
+ writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG);
+
+ val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG);
+ val |= TSE_PCS_USE_SGMII_AN_MASK;
+ writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG);
+
+ val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG);
+ val |= TSE_PCS_CONTROL_RESTART_AN_MASK;
+
+ tse_pcs_reset(tse_pcs_base, pcs);
+
+ setup_timer(&pcs->aneg_link_timer,
+ aneg_link_timer_callback, (unsigned long)pcs);
+ mod_timer(&pcs->aneg_link_timer, jiffies +
+ msecs_to_jiffies(AUTONEGO_LINK_TIMER));
+ } else if (phy_dev->autoneg == AUTONEG_DISABLE) {
+ val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG);
+ val &= ~TSE_PCS_CONTROL_AN_EN_MASK;
+ writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG);
+
+ val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG);
+ val &= ~TSE_PCS_USE_SGMII_AN_MASK;
+ writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG);
+
+ val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG);
+ val &= ~TSE_PCS_SGMII_SPEED_MASK;
+
+ switch (speed) {
+ case 1000:
+ val |= TSE_PCS_SGMII_SPEED_1000;
+ break;
+ case 100:
+ val |= TSE_PCS_SGMII_SPEED_100;
+ break;
+ case 10:
+ val |= TSE_PCS_SGMII_SPEED_10;
+ break;
+ default:
+ return;
+ }
+ writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG);
+
+ tse_pcs_reset(tse_pcs_base, pcs);
+
+ setup_timer(&pcs->aneg_link_timer,
+ aneg_link_timer_callback, (unsigned long)pcs);
+ mod_timer(&pcs->aneg_link_timer, jiffies +
+ msecs_to_jiffies(AUTONEGO_LINK_TIMER));
+ }
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
new file mode 100644
index 000000000000..2f5882450b06
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
@@ -0,0 +1,36 @@
+/* Copyright Altera Corporation (C) 2016. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Author: Tien Hock Loh <thloh@altera.com>
+ */
+
+#ifndef __TSE_PCS_H__
+#define __TSE_PCS_H__
+
+#include <linux/phy.h>
+#include <linux/timer.h>
+
+struct tse_pcs {
+ struct device *dev;
+ void __iomem *tse_pcs_base;
+ void __iomem *sgmii_adapter_base;
+ struct timer_list aneg_link_timer;
+ int autoneg;
+};
+
+int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs);
+void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
+ unsigned int speed);
+
+#endif /* __TSE_PCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index f96d257308b0..2533b91f1421 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -41,6 +41,8 @@
/* Synopsys Core versions */
#define DWMAC_CORE_3_40 0x34
#define DWMAC_CORE_3_50 0x35
+#define DWMAC_CORE_4_00 0x40
+#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
#define DMA_TX_SIZE 512
#define DMA_RX_SIZE 512
@@ -167,6 +169,9 @@ struct stmmac_extra_stats {
unsigned long mtl_rx_fifo_ctrl_active;
unsigned long mac_rx_frame_ctrl_fifo;
unsigned long mac_gmii_rx_proto_engine;
+ /* TSO */
+ unsigned long tx_tso_frames;
+ unsigned long tx_tso_nfrags;
};
/* CSR Frequency Access Defines*/
@@ -227,6 +232,11 @@ struct stmmac_extra_stats {
#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */
#define DEFAULT_DMA_PBL 8
+/* PCS status and mask defines */
+#define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */
+#define PCS_LINK_IRQ BIT(1) /* PCS Link */
+#define PCS_RGSMIIIS_IRQ BIT(0) /* RGMII or SMII Interrupt */
+
/* Max/Min RI Watchdog Timer count value */
#define MAX_DMA_RIWT 0xff
#define MIN_DMA_RIWT 0x20
@@ -243,6 +253,7 @@ enum rx_frame_status {
csum_none = 0x2,
llc_snap = 0x4,
dma_own = 0x8,
+ rx_not_ls = 0x10,
};
/* Tx status */
@@ -266,9 +277,7 @@ enum dma_irq_status {
#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
-#define CORE_PCS_ANE_COMPLETE (1 << 5)
-#define CORE_PCS_LINK_STATUS (1 << 6)
-#define CORE_RGMII_IRQ (1 << 7)
+#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
/* Physical Coding Sublayer */
struct rgmii_adv {
@@ -300,8 +309,10 @@ struct dma_features {
/* 802.3az - Energy-Efficient Ethernet (EEE) */
unsigned int eee;
unsigned int av;
+ unsigned int tsoen;
/* TX and RX csum */
unsigned int tx_coe;
+ unsigned int rx_coe;
unsigned int rx_coe_type1;
unsigned int rx_coe_type2;
unsigned int rxfifo_over_2048;
@@ -348,6 +359,10 @@ struct stmmac_desc_ops {
void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
bool csum_flag, int mode, bool tx_own,
bool ls);
+ void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
+ int len2, bool tx_own, bool ls,
+ unsigned int tcphdrlen,
+ unsigned int tcppayloadlen);
/* Set/get the owner of the descriptor */
void (*set_tx_owner) (struct dma_desc *p);
int (*get_tx_owner) (struct dma_desc *p);
@@ -380,6 +395,10 @@ struct stmmac_desc_ops {
u64(*get_timestamp) (void *desc, u32 ats);
/* get rx timestamp status */
int (*get_rx_timestamp_status) (void *desc, u32 ats);
+ /* Display ring */
+ void (*display_ring)(void *head, unsigned int size, bool rx);
+ /* set MSS via context descriptor */
+ void (*set_mss)(struct dma_desc *p, unsigned int mss);
};
extern const struct stmmac_desc_ops enh_desc_ops;
@@ -412,9 +431,15 @@ struct stmmac_dma_ops {
int (*dma_interrupt) (void __iomem *ioaddr,
struct stmmac_extra_stats *x);
/* If supported then get the optional core features */
- unsigned int (*get_hw_feature) (void __iomem *ioaddr);
+ void (*get_hw_feature)(void __iomem *ioaddr,
+ struct dma_features *dma_cap);
/* Program the HW RX Watchdog */
void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
+ void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len);
+ void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len);
+ void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+ void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+ void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
};
struct mac_device_info;
@@ -446,9 +471,12 @@ struct stmmac_ops {
void (*reset_eee_mode)(struct mac_device_info *hw);
void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
void (*set_eee_pls)(struct mac_device_info *hw, int link);
- void (*ctrl_ane)(struct mac_device_info *hw, bool restart);
- void (*get_adv)(struct mac_device_info *hw, struct rgmii_adv *adv);
void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x);
+ /* PCS calls */
+ void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+ bool loopback);
+ void (*pcs_rane)(void __iomem *ioaddr, bool restart);
+ void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
};
/* PTP and HW Timer helpers */
@@ -463,6 +491,7 @@ struct stmmac_hwtimestamp {
};
extern const struct stmmac_hwtimestamp stmmac_ptp;
+extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
struct mac_link {
int port;
@@ -495,27 +524,59 @@ struct mac_device_info {
const struct stmmac_hwtimestamp *ptp;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
- unsigned int synopsys_uid;
void __iomem *pcsr; /* vpointer to device CSRs */
int multicast_filter_bins;
int unicast_filter_entries;
int mcast_bits_log2;
unsigned int rx_csum;
+ unsigned int pcs;
+ unsigned int pmt;
+ unsigned int ps;
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
- int perfect_uc_entries);
-struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
+ int perfect_uc_entries,
+ int *synopsys_id);
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id);
+struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
+ int perfect_uc_entries, int *synopsys_id);
void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
-
void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low);
+void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
+void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
+
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
+
extern const struct stmmac_mode_ops ring_mode_ops;
extern const struct stmmac_mode_ops chain_mode_ops;
-
+extern const struct stmmac_desc_ops dwmac4_desc_ops;
+
+/**
+ * stmmac_get_synopsys_id - return the SYINID.
+ * @priv: driver private structure
+ * Description: this simple function is to decode and return the SYINID
+ * starting from the HW core register.
+ */
+static inline u32 stmmac_get_synopsys_id(u32 hwid)
+{
+ /* Check Synopsys Id (not available on old chips) */
+ if (likely(hwid)) {
+ u32 uid = ((hwid & 0x0000ff00) >> 8);
+ u32 synid = (hwid & 0x000000ff);
+
+ pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
+ uid, synid);
+
+ return synid;
+ }
+ return 0;
+}
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 0cd3ecff768b..92105916ef40 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -46,6 +46,7 @@ struct rk_priv_data {
struct platform_device *pdev;
int phy_iface;
struct regulator *regulator;
+ bool suspended;
const struct rk_gmac_ops *ops;
bool clk_enabled;
@@ -72,6 +73,122 @@ struct rk_priv_data {
#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
#define GRF_CLR_BIT(nr) (BIT(nr+16))
+#define RK3228_GRF_MAC_CON0 0x0900
+#define RK3228_GRF_MAC_CON1 0x0904
+
+/* RK3228_GRF_MAC_CON0 */
+#define RK3228_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
+#define RK3228_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+/* RK3228_GRF_MAC_CON1 */
+#define RK3228_GMAC_PHY_INTF_SEL_RGMII \
+ (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
+#define RK3228_GMAC_PHY_INTF_SEL_RMII \
+ (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3228_GMAC_FLOW_CTRL GRF_BIT(3)
+#define RK3228_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
+#define RK3228_GMAC_SPEED_10M GRF_CLR_BIT(2)
+#define RK3228_GMAC_SPEED_100M GRF_BIT(2)
+#define RK3228_GMAC_RMII_CLK_25M GRF_BIT(7)
+#define RK3228_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
+#define RK3228_GMAC_CLK_125M (GRF_CLR_BIT(8) | GRF_CLR_BIT(9))
+#define RK3228_GMAC_CLK_25M (GRF_BIT(8) | GRF_BIT(9))
+#define RK3228_GMAC_CLK_2_5M (GRF_CLR_BIT(8) | GRF_BIT(9))
+#define RK3228_GMAC_RMII_MODE GRF_BIT(10)
+#define RK3228_GMAC_RMII_MODE_CLR GRF_CLR_BIT(10)
+#define RK3228_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
+#define RK3228_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
+#define RK3228_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
+#define RK3228_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(1)
+
+static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_PHY_INTF_SEL_RGMII |
+ RK3228_GMAC_RMII_MODE_CLR |
+ RK3228_GMAC_RXCLK_DLY_ENABLE |
+ RK3228_GMAC_TXCLK_DLY_ENABLE);
+
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0,
+ RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3228_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_PHY_INTF_SEL_RMII |
+ RK3228_GMAC_RMII_MODE);
+
+ /* set MAC to RMII mode */
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, GRF_BIT(11));
+}
+
+static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_RMII_CLK_2_5M |
+ RK3228_GMAC_SPEED_10M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_RMII_CLK_25M |
+ RK3228_GMAC_SPEED_100M);
+ else
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+}
+
+static const struct rk_gmac_ops rk3228_ops = {
+ .set_to_rgmii = rk3228_set_to_rgmii,
+ .set_to_rmii = rk3228_set_to_rmii,
+ .set_rgmii_speed = rk3228_set_rgmii_speed,
+ .set_rmii_speed = rk3228_set_rmii_speed,
+};
+
#define RK3288_GRF_SOC_CON1 0x0248
#define RK3288_GRF_SOC_CON3 0x0250
@@ -529,9 +646,8 @@ static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
return bsp_priv;
}
-static int rk_gmac_init(struct platform_device *pdev, void *priv)
+static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
{
- struct rk_priv_data *bsp_priv = priv;
int ret;
ret = phy_power_on(bsp_priv, true);
@@ -545,14 +661,50 @@ static int rk_gmac_init(struct platform_device *pdev, void *priv)
return 0;
}
-static void rk_gmac_exit(struct platform_device *pdev, void *priv)
+static void rk_gmac_powerdown(struct rk_priv_data *gmac)
{
- struct rk_priv_data *gmac = priv;
-
phy_power_on(gmac, false);
gmac_clk_enable(gmac, false);
}
+static int rk_gmac_init(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *bsp_priv = priv;
+
+ return rk_gmac_powerup(bsp_priv);
+}
+
+static void rk_gmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *bsp_priv = priv;
+
+ rk_gmac_powerdown(bsp_priv);
+}
+
+static void rk_gmac_suspend(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *bsp_priv = priv;
+
+ /* Keep the PHY up if we use Wake-on-Lan. */
+ if (device_may_wakeup(&pdev->dev))
+ return;
+
+ rk_gmac_powerdown(bsp_priv);
+ bsp_priv->suspended = true;
+}
+
+static void rk_gmac_resume(struct platform_device *pdev, void *priv)
+{
+ struct rk_priv_data *bsp_priv = priv;
+
+ /* The PHY was up for Wake-on-Lan. */
+ if (!bsp_priv->suspended)
+ return;
+
+ rk_gmac_powerup(bsp_priv);
+ bsp_priv->suspended = false;
+}
+
static void rk_fix_speed(void *priv, unsigned int speed)
{
struct rk_priv_data *bsp_priv = priv;
@@ -591,6 +743,8 @@ static int rk_gmac_probe(struct platform_device *pdev)
plat_dat->init = rk_gmac_init;
plat_dat->exit = rk_gmac_exit;
plat_dat->fix_mac_speed = rk_fix_speed;
+ plat_dat->suspend = rk_gmac_suspend;
+ plat_dat->resume = rk_gmac_resume;
plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
if (IS_ERR(plat_dat->bsp_priv))
@@ -604,6 +758,7 @@ static int rk_gmac_probe(struct platform_device *pdev)
}
static const struct of_device_id rk_gmac_dwmac_match[] = {
+ { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
{ .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
{ .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
{ }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index afb90d129cb6..bec6963ac71e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -27,6 +27,11 @@
#include "stmmac.h"
#include "stmmac_platform.h"
+#include "altr_tse_pcs.h"
+
+#define SGMII_ADAPTER_CTRL_REG 0x00
+#define SGMII_ADAPTER_DISABLE 0x0001
+
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
@@ -49,37 +54,49 @@ struct socfpga_dwmac {
u32 reg_shift;
struct device *dev;
struct regmap *sys_mgr_base_addr;
+ struct reset_control *stmmac_rst;
void __iomem *splitter_base;
bool f2h_ptp_ref_clk;
+ struct tse_pcs pcs;
};
static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
{
struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
void __iomem *splitter_base = dwmac->splitter_base;
+ void __iomem *tse_pcs_base = dwmac->pcs.tse_pcs_base;
+ void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
+ struct device *dev = dwmac->dev;
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct phy_device *phy_dev = ndev->phydev;
u32 val;
- if (!splitter_base)
- return;
-
- val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
- val &= ~EMAC_SPLITTER_CTRL_SPEED_MASK;
-
- switch (speed) {
- case 1000:
- val |= EMAC_SPLITTER_CTRL_SPEED_1000;
- break;
- case 100:
- val |= EMAC_SPLITTER_CTRL_SPEED_100;
- break;
- case 10:
- val |= EMAC_SPLITTER_CTRL_SPEED_10;
- break;
- default:
- return;
+ if ((tse_pcs_base) && (sgmii_adapter_base))
+ writew(SGMII_ADAPTER_DISABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+
+ if (splitter_base) {
+ val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
+ val &= ~EMAC_SPLITTER_CTRL_SPEED_MASK;
+
+ switch (speed) {
+ case 1000:
+ val |= EMAC_SPLITTER_CTRL_SPEED_1000;
+ break;
+ case 100:
+ val |= EMAC_SPLITTER_CTRL_SPEED_100;
+ break;
+ case 10:
+ val |= EMAC_SPLITTER_CTRL_SPEED_10;
+ break;
+ default:
+ return;
+ }
+ writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
}
- writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
+ if (tse_pcs_base && sgmii_adapter_base)
+ tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
}
static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
@@ -87,9 +104,12 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
struct device_node *np = dev->of_node;
struct regmap *sys_mgr_base_addr;
u32 reg_offset, reg_shift;
- int ret;
- struct device_node *np_splitter;
+ int ret, index;
+ struct device_node *np_splitter = NULL;
+ struct device_node *np_sgmii_adapter = NULL;
struct resource res_splitter;
+ struct resource res_tse_pcs;
+ struct resource res_sgmii_adapter;
dwmac->interface = of_get_phy_mode(np);
@@ -115,7 +135,9 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
np_splitter = of_parse_phandle(np, "altr,emac-splitter", 0);
if (np_splitter) {
- if (of_address_to_resource(np_splitter, 0, &res_splitter)) {
+ ret = of_address_to_resource(np_splitter, 0, &res_splitter);
+ of_node_put(np_splitter);
+ if (ret) {
dev_info(dev, "Missing emac splitter address\n");
return -EINVAL;
}
@@ -127,15 +149,89 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
}
}
+ np_sgmii_adapter = of_parse_phandle(np,
+ "altr,gmii-to-sgmii-converter", 0);
+ if (np_sgmii_adapter) {
+ index = of_property_match_string(np_sgmii_adapter, "reg-names",
+ "hps_emac_interface_splitter_avalon_slave");
+
+ if (index >= 0) {
+ if (of_address_to_resource(np_sgmii_adapter, index,
+ &res_splitter)) {
+ dev_err(dev,
+ "%s: ERROR: missing emac splitter address\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ dwmac->splitter_base =
+ devm_ioremap_resource(dev, &res_splitter);
+
+ if (IS_ERR(dwmac->splitter_base)) {
+ ret = PTR_ERR(dwmac->splitter_base);
+ goto err_node_put;
+ }
+ }
+
+ index = of_property_match_string(np_sgmii_adapter, "reg-names",
+ "gmii_to_sgmii_adapter_avalon_slave");
+
+ if (index >= 0) {
+ if (of_address_to_resource(np_sgmii_adapter, index,
+ &res_sgmii_adapter)) {
+ dev_err(dev,
+ "%s: ERROR: failed mapping adapter\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ dwmac->pcs.sgmii_adapter_base =
+ devm_ioremap_resource(dev, &res_sgmii_adapter);
+
+ if (IS_ERR(dwmac->pcs.sgmii_adapter_base)) {
+ ret = PTR_ERR(dwmac->pcs.sgmii_adapter_base);
+ goto err_node_put;
+ }
+ }
+
+ index = of_property_match_string(np_sgmii_adapter, "reg-names",
+ "eth_tse_control_port");
+
+ if (index >= 0) {
+ if (of_address_to_resource(np_sgmii_adapter, index,
+ &res_tse_pcs)) {
+ dev_err(dev,
+ "%s: ERROR: failed mapping tse control port\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ dwmac->pcs.tse_pcs_base =
+ devm_ioremap_resource(dev, &res_tse_pcs);
+
+ if (IS_ERR(dwmac->pcs.tse_pcs_base)) {
+ ret = PTR_ERR(dwmac->pcs.tse_pcs_base);
+ goto err_node_put;
+ }
+ }
+ }
dwmac->reg_offset = reg_offset;
dwmac->reg_shift = reg_shift;
dwmac->sys_mgr_base_addr = sys_mgr_base_addr;
dwmac->dev = dev;
+ of_node_put(np_sgmii_adapter);
return 0;
+
+err_node_put:
+ of_node_put(np_sgmii_adapter);
+ return ret;
}
-static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
+static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
{
struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
int phymode = dwmac->interface;
@@ -150,6 +246,7 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
break;
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_SGMII:
val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
break;
default:
@@ -164,6 +261,10 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
if (dwmac->splitter_base)
val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+ /* Assert reset to the enet controller before changing the phy mode */
+ if (dwmac->stmmac_rst)
+ reset_control_assert(dwmac->stmmac_rst);
+
regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
ctrl |= val << reg_shift;
@@ -181,57 +282,19 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
- return 0;
-}
-
-static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
-{
- struct socfpga_dwmac *dwmac = priv;
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct stmmac_priv *stpriv = NULL;
- int ret = 0;
-
- if (!ndev)
- return -EINVAL;
-
- stpriv = netdev_priv(ndev);
- if (!stpriv)
- return -EINVAL;
-
- /* Assert reset to the enet controller before changing the phy mode */
- if (stpriv->stmmac_rst)
- reset_control_assert(stpriv->stmmac_rst);
-
- /* Setup the phy mode in the system manager registers according to
- * devicetree configuration
- */
- ret = socfpga_dwmac_setup(dwmac);
-
/* Deassert reset for the phy configuration to be sampled by
* the enet controller, and operation to start in requested mode
*/
- if (stpriv->stmmac_rst)
- reset_control_deassert(stpriv->stmmac_rst);
-
- /* Before the enet controller is suspended, the phy is suspended.
- * This causes the phy clock to be gated. The enet controller is
- * resumed before the phy, so the clock is still gated "off" when
- * the enet controller is resumed. This code makes sure the phy
- * is "resumed" before reinitializing the enet controller since
- * the enet controller depends on an active phy clock to complete
- * a DMA reset. A DMA reset will "time out" if executed
- * with no phy clock input on the Synopsys enet controller.
- * Verified through Synopsys Case #8000711656.
- *
- * Note that the phy clock is also gated when the phy is isolated.
- * Phy "suspend" and "isolate" controls are located in phy basic
- * control register 0, and can be modified by the phy driver
- * framework.
- */
- if (stpriv->phydev)
- phy_resume(stpriv->phydev);
+ if (dwmac->stmmac_rst)
+ reset_control_deassert(dwmac->stmmac_rst);
+ if (phymode == PHY_INTERFACE_MODE_SGMII) {
+ if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
+ dev_err(dwmac->dev, "Unable to initialize TSE PCS");
+ return -EINVAL;
+ }
+ }
- return ret;
+ return 0;
}
static int socfpga_dwmac_probe(struct platform_device *pdev)
@@ -260,23 +323,60 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
return ret;
}
- ret = socfpga_dwmac_setup(dwmac);
- if (ret) {
- dev_err(dev, "couldn't setup SoC glue (%d)\n", ret);
- return ret;
- }
-
plat_dat->bsp_priv = dwmac;
- plat_dat->init = socfpga_dwmac_init;
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
- if (!ret)
- ret = socfpga_dwmac_init(pdev, dwmac);
+
+ if (!ret) {
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *stpriv = netdev_priv(ndev);
+
+ /* The socfpga driver needs to control the stmmac reset to
+ * set the phy mode. Create a copy of the core reset handel
+ * so it can be used by the driver later.
+ */
+ dwmac->stmmac_rst = stpriv->stmmac_rst;
+
+ ret = socfpga_dwmac_set_phy_mode(dwmac);
+ }
return ret;
}
+#ifdef CONFIG_PM_SLEEP
+static int socfpga_dwmac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ socfpga_dwmac_set_phy_mode(priv->plat->bsp_priv);
+
+ /* Before the enet controller is suspended, the phy is suspended.
+ * This causes the phy clock to be gated. The enet controller is
+ * resumed before the phy, so the clock is still gated "off" when
+ * the enet controller is resumed. This code makes sure the phy
+ * is "resumed" before reinitializing the enet controller since
+ * the enet controller depends on an active phy clock to complete
+ * a DMA reset. A DMA reset will "time out" if executed
+ * with no phy clock input on the Synopsys enet controller.
+ * Verified through Synopsys Case #8000711656.
+ *
+ * Note that the phy clock is also gated when the phy is isolated.
+ * Phy "suspend" and "isolate" controls are located in phy basic
+ * control register 0, and can be modified by the phy driver
+ * framework.
+ */
+ if (priv->phydev)
+ phy_resume(priv->phydev);
+
+ return stmmac_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(socfpga_dwmac_pm_ops, stmmac_suspend,
+ socfpga_dwmac_resume);
+
static const struct of_device_id socfpga_dwmac_match[] = {
{ .compatible = "altr,socfpga-stmmac" },
{ }
@@ -288,7 +388,7 @@ static struct platform_driver socfpga_dwmac_driver = {
.remove = stmmac_pltfr_remove,
.driver = {
.name = "socfpga-dwmac",
- .pm = &stmmac_pltfr_pm_ops,
+ .pm = &socfpga_dwmac_pm_ops,
.of_match_table = socfpga_dwmac_match,
},
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
index b0593a4268ee..ff3e5ab39bd0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -38,19 +38,26 @@
#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
-enum dwmac1000_irq_status {
- lpiis_irq = 0x400,
- time_stamp_irq = 0x0200,
- mmc_rx_csum_offload_irq = 0x0080,
- mmc_tx_irq = 0x0040,
- mmc_rx_irq = 0x0020,
- mmc_irq = 0x0010,
- pmt_irq = 0x0008,
- pcs_ane_irq = 0x0004,
- pcs_link_irq = 0x0002,
- rgmii_irq = 0x0001,
-};
-#define GMAC_INT_MASK 0x0000003c /* interrupt mask register */
+#define GMAC_INT_STATUS_PMT BIT(3)
+#define GMAC_INT_STATUS_MMCIS BIT(4)
+#define GMAC_INT_STATUS_MMCRIS BIT(5)
+#define GMAC_INT_STATUS_MMCTIS BIT(6)
+#define GMAC_INT_STATUS_MMCCSUM BIT(7)
+#define GMAC_INT_STATUS_TSTAMP BIT(9)
+#define GMAC_INT_STATUS_LPIIS BIT(10)
+
+/* interrupt mask register */
+#define GMAC_INT_MASK 0x0000003c
+#define GMAC_INT_DISABLE_RGMII BIT(0)
+#define GMAC_INT_DISABLE_PCSLINK BIT(1)
+#define GMAC_INT_DISABLE_PCSAN BIT(2)
+#define GMAC_INT_DISABLE_PMT BIT(3)
+#define GMAC_INT_DISABLE_TIMESTAMP BIT(9)
+#define GMAC_INT_DISABLE_PCS (GMAC_INT_DISABLE_RGMII | \
+ GMAC_INT_DISABLE_PCSLINK | \
+ GMAC_INT_DISABLE_PCSAN)
+#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_TIMESTAMP | \
+ GMAC_INT_DISABLE_PCS)
/* PMT Control and Status */
#define GMAC_PMT 0x0000002c
@@ -90,42 +97,23 @@ enum power_event {
(reg * 8))
#define GMAC_MAX_PERFECT_ADDRESSES 1
-/* PCS registers (AN/TBI/SGMII/RGMII) offset */
-#define GMAC_AN_CTRL 0x000000c0 /* AN control */
-#define GMAC_AN_STATUS 0x000000c4 /* AN status */
-#define GMAC_ANE_ADV 0x000000c8 /* Auto-Neg. Advertisement */
-#define GMAC_ANE_LPA 0x000000cc /* Auto-Neg. link partener ability */
-#define GMAC_ANE_EXP 0x000000d0 /* ANE expansion */
-#define GMAC_TBI 0x000000d4 /* TBI extend status */
-#define GMAC_S_R_GMII 0x000000d8 /* SGMII RGMII status */
-
-/* AN Configuration defines */
-#define GMAC_AN_CTRL_RAN 0x00000200 /* Restart Auto-Negotiation */
-#define GMAC_AN_CTRL_ANE 0x00001000 /* Auto-Negotiation Enable */
-#define GMAC_AN_CTRL_ELE 0x00004000 /* External Loopback Enable */
-#define GMAC_AN_CTRL_ECD 0x00010000 /* Enable Comma Detect */
-#define GMAC_AN_CTRL_LR 0x00020000 /* Lock to Reference */
-#define GMAC_AN_CTRL_SGMRAL 0x00040000 /* SGMII RAL Control */
-
-/* AN Status defines */
-#define GMAC_AN_STATUS_LS 0x00000004 /* Link Status 0:down 1:up */
-#define GMAC_AN_STATUS_ANA 0x00000008 /* Auto-Negotiation Ability */
-#define GMAC_AN_STATUS_ANC 0x00000020 /* Auto-Negotiation Complete */
-#define GMAC_AN_STATUS_ES 0x00000100 /* Extended Status */
-
-/* Register 54 (SGMII/RGMII status register) */
-#define GMAC_S_R_GMII_LINK 0x8
-#define GMAC_S_R_GMII_SPEED 0x5
-#define GMAC_S_R_GMII_SPEED_SHIFT 0x1
-#define GMAC_S_R_GMII_MODE 0x1
-#define GMAC_S_R_GMII_SPEED_125 2
-#define GMAC_S_R_GMII_SPEED_25 1
-
-/* Common ADV and LPA defines */
-#define GMAC_ANE_FD (1 << 5)
-#define GMAC_ANE_HD (1 << 6)
-#define GMAC_ANE_PSE (3 << 7)
-#define GMAC_ANE_PSE_SHIFT 7
+#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
+#define GMAC_RGSMIIIS 0x000000d8 /* RGMII/SMII status */
+
+/* SGMII/RGMII status register */
+#define GMAC_RGSMIIIS_LNKMODE BIT(0)
+#define GMAC_RGSMIIIS_SPEED GENMASK(2, 1)
+#define GMAC_RGSMIIIS_SPEED_SHIFT 1
+#define GMAC_RGSMIIIS_LNKSTS BIT(3)
+#define GMAC_RGSMIIIS_JABTO BIT(4)
+#define GMAC_RGSMIIIS_FALSECARDET BIT(5)
+#define GMAC_RGSMIIIS_SMIDRXS BIT(16)
+/* LNKMOD */
+#define GMAC_RGSMIIIS_LNKMOD_MASK 0x1
+/* LNKSPEED */
+#define GMAC_RGSMIIIS_SPEED_125 0x2
+#define GMAC_RGSMIIIS_SPEED_25 0x1
+#define GMAC_RGSMIIIS_SPEED_2_5 0x0
/* GMAC Configuration defines */
#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index c2941172f6d1..885a5e64519d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -30,22 +30,48 @@
#include <linux/slab.h>
#include <linux/ethtool.h>
#include <asm/io.h>
+#include "stmmac_pcs.h"
#include "dwmac1000.h"
static void dwmac1000_core_init(struct mac_device_info *hw, int mtu)
{
void __iomem *ioaddr = hw->pcsr;
u32 value = readl(ioaddr + GMAC_CONTROL);
+
+ /* Configure GMAC core */
value |= GMAC_CORE_INIT;
+
if (mtu > 1500)
value |= GMAC_CONTROL_2K;
if (mtu > 2000)
value |= GMAC_CONTROL_JE;
+ if (hw->ps) {
+ value |= GMAC_CONTROL_TE;
+
+ if (hw->ps == SPEED_1000) {
+ value &= ~GMAC_CONTROL_PS;
+ } else {
+ value |= GMAC_CONTROL_PS;
+
+ if (hw->ps == SPEED_10)
+ value &= ~GMAC_CONTROL_FES;
+ else
+ value |= GMAC_CONTROL_FES;
+ }
+ }
+
writel(value, ioaddr + GMAC_CONTROL);
/* Mask GMAC interrupts */
- writel(0x207, ioaddr + GMAC_INT_MASK);
+ value = GMAC_INT_DEFAULT_MASK;
+
+ if (hw->pmt)
+ value &= ~GMAC_INT_DISABLE_PMT;
+ if (hw->pcs)
+ value &= ~GMAC_INT_DISABLE_PCS;
+
+ writel(value, ioaddr + GMAC_INT_MASK);
#ifdef STMMAC_VLAN_TAG_USED
/* Tag detection without filtering */
@@ -235,12 +261,45 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
}
if (mode & WAKE_UCAST) {
pr_debug("GMAC: WOL on global unicast\n");
- pmt |= global_unicast;
+ pmt |= power_down | global_unicast | wake_up_frame_en;
}
writel(pmt, ioaddr + GMAC_PMT);
}
+/* RGMII or SMII interface */
+static void dwmac1000_rgsmii(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+{
+ u32 status;
+
+ status = readl(ioaddr + GMAC_RGSMIIIS);
+ x->irq_rgmii_n++;
+
+ /* Check the link status */
+ if (status & GMAC_RGSMIIIS_LNKSTS) {
+ int speed_value;
+
+ x->pcs_link = 1;
+
+ speed_value = ((status & GMAC_RGSMIIIS_SPEED) >>
+ GMAC_RGSMIIIS_SPEED_SHIFT);
+ if (speed_value == GMAC_RGSMIIIS_SPEED_125)
+ x->pcs_speed = SPEED_1000;
+ else if (speed_value == GMAC_RGSMIIIS_SPEED_25)
+ x->pcs_speed = SPEED_100;
+ else
+ x->pcs_speed = SPEED_10;
+
+ x->pcs_duplex = (status & GMAC_RGSMIIIS_LNKMOD_MASK);
+
+ pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
+ x->pcs_duplex ? "Full" : "Half");
+ } else {
+ x->pcs_link = 0;
+ pr_info("Link is Down\n");
+ }
+}
+
static int dwmac1000_irq_status(struct mac_device_info *hw,
struct stmmac_extra_stats *x)
{
@@ -249,19 +308,20 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
int ret = 0;
/* Not used events (e.g. MMC interrupts) are not handled. */
- if ((intr_status & mmc_tx_irq))
+ if ((intr_status & GMAC_INT_STATUS_MMCTIS))
x->mmc_tx_irq_n++;
- if (unlikely(intr_status & mmc_rx_irq))
+ if (unlikely(intr_status & GMAC_INT_STATUS_MMCRIS))
x->mmc_rx_irq_n++;
- if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+ if (unlikely(intr_status & GMAC_INT_STATUS_MMCCSUM))
x->mmc_rx_csum_offload_irq_n++;
- if (unlikely(intr_status & pmt_irq)) {
+ if (unlikely(intr_status & GMAC_INT_DISABLE_PMT)) {
/* clear the PMT bits 5 and 6 by reading the PMT status reg */
readl(ioaddr + GMAC_PMT);
x->irq_receive_pmt_irq_n++;
}
- /* MAC trx/rx EEE LPI entry/exit interrupts */
- if (intr_status & lpiis_irq) {
+
+ /* MAC tx/rx EEE LPI entry/exit interrupts */
+ if (intr_status & GMAC_INT_STATUS_LPIIS) {
/* Clean LPI interrupt by reading the Reg 12 */
ret = readl(ioaddr + LPI_CTRL_STATUS);
@@ -275,36 +335,10 @@ static int dwmac1000_irq_status(struct mac_device_info *hw,
x->irq_rx_path_exit_lpi_mode_n++;
}
- if ((intr_status & pcs_ane_irq) || (intr_status & pcs_link_irq)) {
- readl(ioaddr + GMAC_AN_STATUS);
- x->irq_pcs_ane_n++;
- }
- if (intr_status & rgmii_irq) {
- u32 status = readl(ioaddr + GMAC_S_R_GMII);
- x->irq_rgmii_n++;
-
- /* Save and dump the link status. */
- if (status & GMAC_S_R_GMII_LINK) {
- int speed_value = (status & GMAC_S_R_GMII_SPEED) >>
- GMAC_S_R_GMII_SPEED_SHIFT;
- x->pcs_duplex = (status & GMAC_S_R_GMII_MODE);
-
- if (speed_value == GMAC_S_R_GMII_SPEED_125)
- x->pcs_speed = SPEED_1000;
- else if (speed_value == GMAC_S_R_GMII_SPEED_25)
- x->pcs_speed = SPEED_100;
- else
- x->pcs_speed = SPEED_10;
+ dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
- x->pcs_link = 1;
- pr_debug("%s: Link is Up - %d/%s\n", __func__,
- (int)x->pcs_speed,
- x->pcs_duplex ? "Full" : "Half");
- } else {
- x->pcs_link = 0;
- pr_debug("%s: Link is Down\n", __func__);
- }
- }
+ if (intr_status & PCS_RGSMIIIS_IRQ)
+ dwmac1000_rgsmii(ioaddr, x);
return ret;
}
@@ -363,38 +397,20 @@ static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
writel(value, ioaddr + LPI_TIMER_CTRL);
}
-static void dwmac1000_ctrl_ane(struct mac_device_info *hw, bool restart)
+static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+ bool loopback)
{
- void __iomem *ioaddr = hw->pcsr;
- /* auto negotiation enable and External Loopback enable */
- u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
-
- if (restart)
- value |= GMAC_AN_CTRL_RAN;
-
- writel(value, ioaddr + GMAC_AN_CTRL);
+ dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
}
-static void dwmac1000_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv)
+static void dwmac1000_rane(void __iomem *ioaddr, bool restart)
{
- void __iomem *ioaddr = hw->pcsr;
- u32 value = readl(ioaddr + GMAC_ANE_ADV);
-
- if (value & GMAC_ANE_FD)
- adv->duplex = DUPLEX_FULL;
- if (value & GMAC_ANE_HD)
- adv->duplex |= DUPLEX_HALF;
-
- adv->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
-
- value = readl(ioaddr + GMAC_ANE_LPA);
-
- if (value & GMAC_ANE_FD)
- adv->lp_duplex = DUPLEX_FULL;
- if (value & GMAC_ANE_HD)
- adv->lp_duplex = DUPLEX_HALF;
+ dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
+}
- adv->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
+{
+ dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
}
static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
@@ -485,13 +501,15 @@ static const struct stmmac_ops dwmac1000_ops = {
.reset_eee_mode = dwmac1000_reset_eee_mode,
.set_eee_timer = dwmac1000_set_eee_timer,
.set_eee_pls = dwmac1000_set_eee_pls,
- .ctrl_ane = dwmac1000_ctrl_ane,
- .get_adv = dwmac1000_get_adv,
.debug = dwmac1000_debug,
+ .pcs_ctrl_ane = dwmac1000_ctrl_ane,
+ .pcs_rane = dwmac1000_rane,
+ .pcs_get_adv_lp = dwmac1000_get_adv_lp,
};
struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
- int perfect_uc_entries)
+ int perfect_uc_entries,
+ int *synopsys_id)
{
struct mac_device_info *mac;
u32 hwid = readl(ioaddr + GMAC_VERSION);
@@ -516,7 +534,9 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
mac->link.speed = GMAC_CONTROL_FES;
mac->mii.addr = GMAC_MII_ADDR;
mac->mii.data = GMAC_MII_DATA;
- mac->synopsys_uid = hwid;
+
+ /* Get and dump the chip ID */
+ *synopsys_id = stmmac_get_synopsys_id(hwid);
return mac;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
index da32d6037e3e..990746955216 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -215,9 +215,40 @@ static void dwmac1000_dump_dma_regs(void __iomem *ioaddr)
}
}
-static unsigned int dwmac1000_get_hw_feature(void __iomem *ioaddr)
+static void dwmac1000_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
{
- return readl(ioaddr + DMA_HW_FEATURE);
+ u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
+
+ dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
+ dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
+ dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
+ dma_cap->hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
+ dma_cap->multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
+ dma_cap->pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
+ dma_cap->sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
+ dma_cap->pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
+ dma_cap->pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
+ /* MMC */
+ dma_cap->rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
+ /* IEEE 1588-2002 */
+ dma_cap->time_stamp =
+ (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
+ /* IEEE 1588-2008 */
+ dma_cap->atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
+ /* 802.3az - Energy-Efficient Ethernet (EEE) */
+ dma_cap->eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
+ dma_cap->av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
+ /* TX and RX csum */
+ dma_cap->tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
+ dma_cap->rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
+ dma_cap->rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
+ dma_cap->rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
+ /* TX and RX number of channels */
+ dma_cap->number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
+ dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
+ /* Alternate (enhanced) DESC mode */
+ dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
}
static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index f8dd773f246c..6418b2e07619 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -173,7 +173,7 @@ static const struct stmmac_ops dwmac100_ops = {
.get_umac_addr = dwmac100_get_umac_addr,
};
-struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
+struct mac_device_info *dwmac100_setup(void __iomem *ioaddr, int *synopsys_id)
{
struct mac_device_info *mac;
@@ -192,7 +192,8 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
mac->link.speed = 0;
mac->mii.addr = MAC_MII_ADDR;
mac->mii.data = MAC_MII_DATA;
- mac->synopsys_uid = 0;
+ /* Synopsys Id is not available on old chips */
+ *synopsys_id = 0;
return mac;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
new file mode 100644
index 000000000000..6f4f5ce25114
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -0,0 +1,268 @@
+/*
+ * DWMAC4 Header file.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#ifndef __DWMAC4_H__
+#define __DWMAC4_H__
+
+#include "common.h"
+
+/* MAC registers */
+#define GMAC_CONFIG 0x00000000
+#define GMAC_PACKET_FILTER 0x00000008
+#define GMAC_HASH_TAB_0_31 0x00000010
+#define GMAC_HASH_TAB_32_63 0x00000014
+#define GMAC_RX_FLOW_CTRL 0x00000090
+#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
+#define GMAC_INT_STATUS 0x000000b0
+#define GMAC_INT_EN 0x000000b4
+#define GMAC_PCS_BASE 0x000000e0
+#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
+#define GMAC_PMT 0x000000c0
+#define GMAC_VERSION 0x00000110
+#define GMAC_DEBUG 0x00000114
+#define GMAC_HW_FEATURE0 0x0000011c
+#define GMAC_HW_FEATURE1 0x00000120
+#define GMAC_HW_FEATURE2 0x00000124
+#define GMAC_MDIO_ADDR 0x00000200
+#define GMAC_MDIO_DATA 0x00000204
+#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
+#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
+
+/* MAC Packet Filtering */
+#define GMAC_PACKET_FILTER_PR BIT(0)
+#define GMAC_PACKET_FILTER_HMC BIT(2)
+#define GMAC_PACKET_FILTER_PM BIT(4)
+
+#define GMAC_MAX_PERFECT_ADDRESSES 128
+
+/* MAC Flow Control RX */
+#define GMAC_RX_FLOW_CTRL_RFE BIT(0)
+
+/* MAC Flow Control TX */
+#define GMAC_TX_FLOW_CTRL_TFE BIT(1)
+#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
+
+/* MAC Interrupt bitmap*/
+#define GMAC_INT_RGSMIIS BIT(0)
+#define GMAC_INT_PCS_LINK BIT(1)
+#define GMAC_INT_PCS_ANE BIT(2)
+#define GMAC_INT_PCS_PHYIS BIT(3)
+#define GMAC_INT_PMT_EN BIT(4)
+#define GMAC_INT_LPI_EN BIT(5)
+
+#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
+ GMAC_INT_PCS_ANE)
+
+#define GMAC_INT_DEFAULT_MASK GMAC_INT_PMT_EN
+
+enum dwmac4_irq_status {
+ time_stamp_irq = 0x00001000,
+ mmc_rx_csum_offload_irq = 0x00000800,
+ mmc_tx_irq = 0x00000400,
+ mmc_rx_irq = 0x00000200,
+ mmc_irq = 0x00000100,
+ pmt_irq = 0x00000010,
+};
+
+/* MAC PMT bitmap */
+enum power_event {
+ pointer_reset = 0x80000000,
+ global_unicast = 0x00000200,
+ wake_up_rx_frame = 0x00000040,
+ magic_frame = 0x00000020,
+ wake_up_frame_en = 0x00000004,
+ magic_pkt_en = 0x00000002,
+ power_down = 0x00000001,
+};
+
+/* MAC Debug bitmap */
+#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
+#define GMAC_DEBUG_TFCSTS_SHIFT 17
+#define GMAC_DEBUG_TFCSTS_IDLE 0
+#define GMAC_DEBUG_TFCSTS_WAIT 1
+#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2
+#define GMAC_DEBUG_TFCSTS_XFER 3
+#define GMAC_DEBUG_TPESTS BIT(16)
+#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1)
+#define GMAC_DEBUG_RFCFCSTS_SHIFT 1
+#define GMAC_DEBUG_RPESTS BIT(0)
+
+/* MAC config */
+#define GMAC_CONFIG_IPC BIT(27)
+#define GMAC_CONFIG_2K BIT(22)
+#define GMAC_CONFIG_ACS BIT(20)
+#define GMAC_CONFIG_BE BIT(18)
+#define GMAC_CONFIG_JD BIT(17)
+#define GMAC_CONFIG_JE BIT(16)
+#define GMAC_CONFIG_PS BIT(15)
+#define GMAC_CONFIG_FES BIT(14)
+#define GMAC_CONFIG_DM BIT(13)
+#define GMAC_CONFIG_DCRS BIT(9)
+#define GMAC_CONFIG_TE BIT(1)
+#define GMAC_CONFIG_RE BIT(0)
+
+/* MAC HW features0 bitmap */
+#define GMAC_HW_FEAT_ADDMAC BIT(18)
+#define GMAC_HW_FEAT_RXCOESEL BIT(16)
+#define GMAC_HW_FEAT_TXCOSEL BIT(14)
+#define GMAC_HW_FEAT_EEESEL BIT(13)
+#define GMAC_HW_FEAT_TSSEL BIT(12)
+#define GMAC_HW_FEAT_MMCSEL BIT(8)
+#define GMAC_HW_FEAT_MGKSEL BIT(7)
+#define GMAC_HW_FEAT_RWKSEL BIT(6)
+#define GMAC_HW_FEAT_SMASEL BIT(5)
+#define GMAC_HW_FEAT_VLHASH BIT(4)
+#define GMAC_HW_FEAT_PCSSEL BIT(3)
+#define GMAC_HW_FEAT_HDSEL BIT(2)
+#define GMAC_HW_FEAT_GMIISEL BIT(1)
+#define GMAC_HW_FEAT_MIISEL BIT(0)
+
+/* MAC HW features1 bitmap */
+#define GMAC_HW_FEAT_AVSEL BIT(20)
+#define GMAC_HW_TSOEN BIT(18)
+
+/* MAC HW features2 bitmap */
+#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
+#define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12)
+
+/* MAC HW ADDR regs */
+#define GMAC_HI_DCS GENMASK(18, 16)
+#define GMAC_HI_DCS_SHIFT 16
+#define GMAC_HI_REG_AE BIT(31)
+
+/* MTL registers */
+#define MTL_INT_STATUS 0x00000c20
+#define MTL_INT_Q0 BIT(0)
+
+#define MTL_CHAN_BASE_ADDR 0x00000d00
+#define MTL_CHAN_BASE_OFFSET 0x40
+#define MTL_CHANX_BASE_ADDR(x) (MTL_CHAN_BASE_ADDR + \
+ (x * MTL_CHAN_BASE_OFFSET))
+
+#define MTL_CHAN_TX_OP_MODE(x) MTL_CHANX_BASE_ADDR(x)
+#define MTL_CHAN_TX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x8)
+#define MTL_CHAN_INT_CTRL(x) (MTL_CHANX_BASE_ADDR(x) + 0x2c)
+#define MTL_CHAN_RX_OP_MODE(x) (MTL_CHANX_BASE_ADDR(x) + 0x30)
+#define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38)
+
+#define MTL_OP_MODE_RSF BIT(5)
+#define MTL_OP_MODE_TSF BIT(1)
+
+#define MTL_OP_MODE_TTC_MASK 0x70
+#define MTL_OP_MODE_TTC_SHIFT 4
+
+#define MTL_OP_MODE_TTC_32 0
+#define MTL_OP_MODE_TTC_64 (1 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_96 (2 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_128 (3 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_192 (4 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_256 (5 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT)
+
+#define MTL_OP_MODE_RTC_MASK 0x18
+#define MTL_OP_MODE_RTC_SHIFT 3
+
+#define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT)
+#define MTL_OP_MODE_RTC_64 0
+#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT)
+#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT)
+
+/* MTL debug */
+#define MTL_DEBUG_TXSTSFSTS BIT(5)
+#define MTL_DEBUG_TXFSTS BIT(4)
+#define MTL_DEBUG_TWCSTS BIT(3)
+
+/* MTL debug: Tx FIFO Read Controller Status */
+#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_TRCSTS_SHIFT 1
+#define MTL_DEBUG_TRCSTS_IDLE 0
+#define MTL_DEBUG_TRCSTS_READ 1
+#define MTL_DEBUG_TRCSTS_TXW 2
+#define MTL_DEBUG_TRCSTS_WRITE 3
+#define MTL_DEBUG_TXPAUSED BIT(0)
+
+/* MAC debug: GMII or MII Transmit Protocol Engine Status */
+#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
+#define MTL_DEBUG_RXFSTS_SHIFT 4
+#define MTL_DEBUG_RXFSTS_EMPTY 0
+#define MTL_DEBUG_RXFSTS_BT 1
+#define MTL_DEBUG_RXFSTS_AT 2
+#define MTL_DEBUG_RXFSTS_FULL 3
+#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_RRCSTS_SHIFT 1
+#define MTL_DEBUG_RRCSTS_IDLE 0
+#define MTL_DEBUG_RRCSTS_RDATA 1
+#define MTL_DEBUG_RRCSTS_RSTAT 2
+#define MTL_DEBUG_RRCSTS_FLUSH 3
+#define MTL_DEBUG_RWCSTS BIT(0)
+
+/* MTL interrupt */
+#define MTL_RX_OVERFLOW_INT_EN BIT(24)
+#define MTL_RX_OVERFLOW_INT BIT(16)
+
+/* Default operating mode of the MAC */
+#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | GMAC_CONFIG_ACS | \
+ GMAC_CONFIG_BE | GMAC_CONFIG_DCRS)
+
+/* To dump the core regs excluding the Address Registers */
+#define GMAC_REG_NUM 132
+
+/* MTL debug */
+#define MTL_DEBUG_TXSTSFSTS BIT(5)
+#define MTL_DEBUG_TXFSTS BIT(4)
+#define MTL_DEBUG_TWCSTS BIT(3)
+
+/* MTL debug: Tx FIFO Read Controller Status */
+#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_TRCSTS_SHIFT 1
+#define MTL_DEBUG_TRCSTS_IDLE 0
+#define MTL_DEBUG_TRCSTS_READ 1
+#define MTL_DEBUG_TRCSTS_TXW 2
+#define MTL_DEBUG_TRCSTS_WRITE 3
+#define MTL_DEBUG_TXPAUSED BIT(0)
+
+/* MAC debug: GMII or MII Transmit Protocol Engine Status */
+#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
+#define MTL_DEBUG_RXFSTS_SHIFT 4
+#define MTL_DEBUG_RXFSTS_EMPTY 0
+#define MTL_DEBUG_RXFSTS_BT 1
+#define MTL_DEBUG_RXFSTS_AT 2
+#define MTL_DEBUG_RXFSTS_FULL 3
+#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_RRCSTS_SHIFT 1
+#define MTL_DEBUG_RRCSTS_IDLE 0
+#define MTL_DEBUG_RRCSTS_RDATA 1
+#define MTL_DEBUG_RRCSTS_RSTAT 2
+#define MTL_DEBUG_RRCSTS_FLUSH 3
+#define MTL_DEBUG_RWCSTS BIT(0)
+
+/* SGMII/RGMII status register */
+#define GMAC_PHYIF_CTRLSTATUS_TC BIT(0)
+#define GMAC_PHYIF_CTRLSTATUS_LUD BIT(1)
+#define GMAC_PHYIF_CTRLSTATUS_SMIDRXS BIT(4)
+#define GMAC_PHYIF_CTRLSTATUS_LNKMOD BIT(16)
+#define GMAC_PHYIF_CTRLSTATUS_SPEED GENMASK(18, 17)
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT 17
+#define GMAC_PHYIF_CTRLSTATUS_LNKSTS BIT(19)
+#define GMAC_PHYIF_CTRLSTATUS_JABTO BIT(20)
+#define GMAC_PHYIF_CTRLSTATUS_FALSECARDET BIT(21)
+/* LNKMOD */
+#define GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK 0x1
+/* LNKSPEED */
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_125 0x2
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_25 0x1
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_2_5 0x0
+
+extern const struct stmmac_dma_ops dwmac4_dma_ops;
+extern const struct stmmac_dma_ops dwmac410_dma_ops;
+#endif /* __DWMAC4_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
new file mode 100644
index 000000000000..51019b794be5
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -0,0 +1,443 @@
+/*
+ * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ * DWC Ether MAC version 4.00 has been used for developing this code.
+ *
+ * This only implements the mac core functions for this chip.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include "stmmac_pcs.h"
+#include "dwmac4.h"
+
+static void dwmac4_core_init(struct mac_device_info *hw, int mtu)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+
+ value |= GMAC_CORE_INIT;
+
+ if (mtu > 1500)
+ value |= GMAC_CONFIG_2K;
+ if (mtu > 2000)
+ value |= GMAC_CONFIG_JE;
+
+ if (hw->ps) {
+ value |= GMAC_CONFIG_TE;
+
+ if (hw->ps == SPEED_1000) {
+ value &= ~GMAC_CONFIG_PS;
+ } else {
+ value |= GMAC_CONFIG_PS;
+
+ if (hw->ps == SPEED_10)
+ value &= ~GMAC_CONFIG_FES;
+ else
+ value |= GMAC_CONFIG_FES;
+ }
+ }
+
+ writel(value, ioaddr + GMAC_CONFIG);
+
+ /* Mask GMAC interrupts */
+ value = GMAC_INT_DEFAULT_MASK;
+ if (hw->pmt)
+ value |= GMAC_INT_PMT_EN;
+ if (hw->pcs)
+ value |= GMAC_PCS_IRQ_DEFAULT;
+
+ writel(value, ioaddr + GMAC_INT_EN);
+}
+
+static void dwmac4_dump_regs(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int i;
+
+ pr_debug("\tDWMAC4 regs (base addr = 0x%p)\n", ioaddr);
+
+ for (i = 0; i < GMAC_REG_NUM; i++) {
+ int offset = i * 4;
+
+ pr_debug("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+ offset, readl(ioaddr + offset));
+ }
+}
+
+static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+
+ if (hw->rx_csum)
+ value |= GMAC_CONFIG_IPC;
+ else
+ value &= ~GMAC_CONFIG_IPC;
+
+ writel(value, ioaddr + GMAC_CONFIG);
+
+ value = readl(ioaddr + GMAC_CONFIG);
+
+ return !!(value & GMAC_CONFIG_IPC);
+}
+
+static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ unsigned int pmt = 0;
+
+ if (mode & WAKE_MAGIC) {
+ pr_debug("GMAC: WOL Magic frame\n");
+ pmt |= power_down | magic_pkt_en;
+ }
+ if (mode & WAKE_UCAST) {
+ pr_debug("GMAC: WOL on global unicast\n");
+ pmt |= power_down | global_unicast | wake_up_frame_en;
+ }
+
+ writel(pmt, ioaddr + GMAC_PMT);
+}
+
+static void dwmac4_set_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr, unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac4_get_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr, unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac4_set_filter(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ unsigned int value = 0;
+
+ if (dev->flags & IFF_PROMISC) {
+ value = GMAC_PACKET_FILTER_PR;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > HASH_TABLE_SIZE)) {
+ /* Pass all multi */
+ value = GMAC_PACKET_FILTER_PM;
+ /* Set the 64 bits of the HASH tab. To be updated if taller
+ * hash table is used
+ */
+ writel(0xffffffff, ioaddr + GMAC_HASH_TAB_0_31);
+ writel(0xffffffff, ioaddr + GMAC_HASH_TAB_32_63);
+ } else if (!netdev_mc_empty(dev)) {
+ u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+
+ /* Hash filter for multicast */
+ value = GMAC_PACKET_FILTER_HMC;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the content of the Hash Table Reg 0 and 1.
+ */
+ int bit_nr =
+ (bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26);
+ /* The most significant bit determines the register
+ * to use while the other 5 bits determines the bit
+ * within the selected register
+ */
+ mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1F));
+ }
+ writel(mc_filter[0], ioaddr + GMAC_HASH_TAB_0_31);
+ writel(mc_filter[1], ioaddr + GMAC_HASH_TAB_32_63);
+ }
+
+ /* Handle multiple unicast addresses */
+ if (netdev_uc_count(dev) > GMAC_MAX_PERFECT_ADDRESSES) {
+ /* Switch to promiscuous mode if more than 128 addrs
+ * are required
+ */
+ value |= GMAC_PACKET_FILTER_PR;
+ } else if (!netdev_uc_empty(dev)) {
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ dwmac4_set_umac_addr(hw, ha->addr, reg);
+ reg++;
+ }
+ }
+
+ writel(value, ioaddr + GMAC_PACKET_FILTER);
+}
+
+static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 channel = STMMAC_CHAN0; /* FIXME */
+ unsigned int flow = 0;
+
+ pr_debug("GMAC Flow-Control:\n");
+ if (fc & FLOW_RX) {
+ pr_debug("\tReceive Flow-Control ON\n");
+ flow |= GMAC_RX_FLOW_CTRL_RFE;
+ writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
+ }
+ if (fc & FLOW_TX) {
+ pr_debug("\tTransmit Flow-Control ON\n");
+ flow |= GMAC_TX_FLOW_CTRL_TFE;
+ writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
+
+ if (duplex) {
+ pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
+ flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
+ writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
+ }
+ }
+}
+
+static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+ bool loopback)
+{
+ dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
+}
+
+static void dwmac4_rane(void __iomem *ioaddr, bool restart)
+{
+ dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
+}
+
+static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
+{
+ dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
+}
+
+/* RGMII or SMII interface */
+static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+{
+ u32 status;
+
+ status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
+ x->irq_rgmii_n++;
+
+ /* Check the link status */
+ if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
+ int speed_value;
+
+ x->pcs_link = 1;
+
+ speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
+ GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
+ if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
+ x->pcs_speed = SPEED_1000;
+ else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
+ x->pcs_speed = SPEED_100;
+ else
+ x->pcs_speed = SPEED_10;
+
+ x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
+
+ pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
+ x->pcs_duplex ? "Full" : "Half");
+ } else {
+ x->pcs_link = 0;
+ pr_info("Link is Down\n");
+ }
+}
+
+static int dwmac4_irq_status(struct mac_device_info *hw,
+ struct stmmac_extra_stats *x)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 mtl_int_qx_status;
+ u32 intr_status;
+ int ret = 0;
+
+ intr_status = readl(ioaddr + GMAC_INT_STATUS);
+
+ /* Not used events (e.g. MMC interrupts) are not handled. */
+ if ((intr_status & mmc_tx_irq))
+ x->mmc_tx_irq_n++;
+ if (unlikely(intr_status & mmc_rx_irq))
+ x->mmc_rx_irq_n++;
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+ x->mmc_rx_csum_offload_irq_n++;
+ /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
+ if (unlikely(intr_status & pmt_irq)) {
+ readl(ioaddr + GMAC_PMT);
+ x->irq_receive_pmt_irq_n++;
+ }
+
+ mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
+ /* Check MTL Interrupt: Currently only one queue is used: Q0. */
+ if (mtl_int_qx_status & MTL_INT_Q0) {
+ /* read Queue 0 Interrupt status */
+ u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
+
+ if (status & MTL_RX_OVERFLOW_INT) {
+ /* clear Interrupt */
+ writel(status | MTL_RX_OVERFLOW_INT,
+ ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
+ ret = CORE_IRQ_MTL_RX_OVERFLOW;
+ }
+ }
+
+ dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
+ if (intr_status & PCS_RGSMIIIS_IRQ)
+ dwmac4_phystatus(ioaddr, x);
+
+ return ret;
+}
+
+static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+{
+ u32 value;
+
+ /* Currently only channel 0 is supported */
+ value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0));
+
+ if (value & MTL_DEBUG_TXSTSFSTS)
+ x->mtl_tx_status_fifo_full++;
+ if (value & MTL_DEBUG_TXFSTS)
+ x->mtl_tx_fifo_not_empty++;
+ if (value & MTL_DEBUG_TWCSTS)
+ x->mmtl_fifo_ctrl++;
+ if (value & MTL_DEBUG_TRCSTS_MASK) {
+ u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
+ >> MTL_DEBUG_TRCSTS_SHIFT;
+ if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
+ x->mtl_tx_fifo_read_ctrl_write++;
+ else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
+ x->mtl_tx_fifo_read_ctrl_wait++;
+ else if (trcsts == MTL_DEBUG_TRCSTS_READ)
+ x->mtl_tx_fifo_read_ctrl_read++;
+ else
+ x->mtl_tx_fifo_read_ctrl_idle++;
+ }
+ if (value & MTL_DEBUG_TXPAUSED)
+ x->mac_tx_in_pause++;
+
+ value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0));
+
+ if (value & MTL_DEBUG_RXFSTS_MASK) {
+ u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
+ >> MTL_DEBUG_RRCSTS_SHIFT;
+
+ if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
+ x->mtl_rx_fifo_fill_level_full++;
+ else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
+ x->mtl_rx_fifo_fill_above_thresh++;
+ else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
+ x->mtl_rx_fifo_fill_below_thresh++;
+ else
+ x->mtl_rx_fifo_fill_level_empty++;
+ }
+ if (value & MTL_DEBUG_RRCSTS_MASK) {
+ u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
+ MTL_DEBUG_RRCSTS_SHIFT;
+
+ if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
+ x->mtl_rx_fifo_read_ctrl_flush++;
+ else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
+ x->mtl_rx_fifo_read_ctrl_read_data++;
+ else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
+ x->mtl_rx_fifo_read_ctrl_status++;
+ else
+ x->mtl_rx_fifo_read_ctrl_idle++;
+ }
+ if (value & MTL_DEBUG_RWCSTS)
+ x->mtl_rx_fifo_ctrl_active++;
+
+ /* GMAC debug */
+ value = readl(ioaddr + GMAC_DEBUG);
+
+ if (value & GMAC_DEBUG_TFCSTS_MASK) {
+ u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
+ >> GMAC_DEBUG_TFCSTS_SHIFT;
+
+ if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
+ x->mac_tx_frame_ctrl_xfer++;
+ else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
+ x->mac_tx_frame_ctrl_pause++;
+ else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
+ x->mac_tx_frame_ctrl_wait++;
+ else
+ x->mac_tx_frame_ctrl_idle++;
+ }
+ if (value & GMAC_DEBUG_TPESTS)
+ x->mac_gmii_tx_proto_engine++;
+ if (value & GMAC_DEBUG_RFCFCSTS_MASK)
+ x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
+ >> GMAC_DEBUG_RFCFCSTS_SHIFT;
+ if (value & GMAC_DEBUG_RPESTS)
+ x->mac_gmii_rx_proto_engine++;
+}
+
+static const struct stmmac_ops dwmac4_ops = {
+ .core_init = dwmac4_core_init,
+ .rx_ipc = dwmac4_rx_ipc_enable,
+ .dump_regs = dwmac4_dump_regs,
+ .host_irq_status = dwmac4_irq_status,
+ .flow_ctrl = dwmac4_flow_ctrl,
+ .pmt = dwmac4_pmt,
+ .set_umac_addr = dwmac4_set_umac_addr,
+ .get_umac_addr = dwmac4_get_umac_addr,
+ .pcs_ctrl_ane = dwmac4_ctrl_ane,
+ .pcs_rane = dwmac4_rane,
+ .pcs_get_adv_lp = dwmac4_get_adv_lp,
+ .debug = dwmac4_debug,
+ .set_filter = dwmac4_set_filter,
+};
+
+struct mac_device_info *dwmac4_setup(void __iomem *ioaddr, int mcbins,
+ int perfect_uc_entries, int *synopsys_id)
+{
+ struct mac_device_info *mac;
+ u32 hwid = readl(ioaddr + GMAC_VERSION);
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+ if (!mac)
+ return NULL;
+
+ mac->pcsr = ioaddr;
+ mac->multicast_filter_bins = mcbins;
+ mac->unicast_filter_entries = perfect_uc_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ mac->mac = &dwmac4_ops;
+
+ mac->link.port = GMAC_CONFIG_PS;
+ mac->link.duplex = GMAC_CONFIG_DM;
+ mac->link.speed = GMAC_CONFIG_FES;
+ mac->mii.addr = GMAC_MDIO_ADDR;
+ mac->mii.data = GMAC_MDIO_DATA;
+
+ /* Get and dump the chip ID */
+ *synopsys_id = stmmac_get_synopsys_id(hwid);
+
+ if (*synopsys_id > DWMAC_CORE_4_00)
+ mac->dma = &dwmac410_dma_ops;
+ else
+ mac->dma = &dwmac4_dma_ops;
+
+ return mac;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
new file mode 100644
index 000000000000..4ec7397e7fb3
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -0,0 +1,389 @@
+/*
+ * This contains the functions to handle the descriptors for DesignWare databook
+ * 4.xx.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/stmmac.h>
+#include "common.h"
+#include "dwmac4_descs.h"
+
+static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p,
+ void __iomem *ioaddr)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int tdes3;
+ int ret = tx_done;
+
+ tdes3 = p->des3;
+
+ /* Get tx owner first */
+ if (unlikely(tdes3 & TDES3_OWN))
+ return tx_dma_own;
+
+ /* Verify tx error by looking at the last segment. */
+ if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR)))
+ return tx_not_ls;
+
+ if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
+ if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
+ x->tx_jabber++;
+ if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
+ x->tx_frame_flushed++;
+ if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
+ (tdes3 & TDES3_EXCESSIVE_COLLISION)))
+ stats->collisions +=
+ (tdes3 & TDES3_COLLISION_COUNT_MASK)
+ >> TDES3_COLLISION_COUNT_SHIFT;
+
+ if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
+ x->tx_deferred++;
+
+ if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR))
+ x->tx_underflow++;
+
+ if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
+ x->tx_ip_header_error++;
+
+ if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
+ x->tx_payload_error++;
+
+ ret = tx_err;
+ }
+
+ if (unlikely(tdes3 & TDES3_DEFERRED))
+ x->tx_deferred++;
+
+ return ret;
+}
+
+static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int rdes1 = p->des1;
+ unsigned int rdes2 = p->des2;
+ unsigned int rdes3 = p->des3;
+ int message_type;
+ int ret = good_frame;
+
+ if (unlikely(rdes3 & RDES3_OWN))
+ return dma_own;
+
+ /* Verify rx error by looking at the last segment. */
+ if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
+ return discard_frame;
+
+ if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
+ if (unlikely(rdes3 & RDES3_GIANT_PACKET))
+ stats->rx_length_errors++;
+ if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
+ x->rx_gmac_overflow++;
+
+ if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG))
+ x->rx_watchdog++;
+
+ if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
+ x->rx_mii++;
+
+ if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
+ x->rx_crc++;
+ stats->rx_crc_errors++;
+ }
+
+ if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
+ x->dribbling_bit++;
+
+ ret = discard_frame;
+ }
+
+ message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
+
+ if (rdes1 & RDES1_IP_HDR_ERROR)
+ x->ip_hdr_err++;
+ if (rdes1 & RDES1_IP_CSUM_BYPASSED)
+ x->ip_csum_bypassed++;
+ if (rdes1 & RDES1_IPV4_HEADER)
+ x->ipv4_pkt_rcvd++;
+ if (rdes1 & RDES1_IPV6_HEADER)
+ x->ipv6_pkt_rcvd++;
+ if (message_type == RDES_EXT_SYNC)
+ x->rx_msg_type_sync++;
+ else if (message_type == RDES_EXT_FOLLOW_UP)
+ x->rx_msg_type_follow_up++;
+ else if (message_type == RDES_EXT_DELAY_REQ)
+ x->rx_msg_type_delay_req++;
+ else if (message_type == RDES_EXT_DELAY_RESP)
+ x->rx_msg_type_delay_resp++;
+ else if (message_type == RDES_EXT_PDELAY_REQ)
+ x->rx_msg_type_pdelay_req++;
+ else if (message_type == RDES_EXT_PDELAY_RESP)
+ x->rx_msg_type_pdelay_resp++;
+ else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
+ x->rx_msg_type_pdelay_follow_up++;
+ else
+ x->rx_msg_type_ext_no_ptp++;
+
+ if (rdes1 & RDES1_PTP_PACKET_TYPE)
+ x->ptp_frame_type++;
+ if (rdes1 & RDES1_PTP_VER)
+ x->ptp_ver++;
+ if (rdes1 & RDES1_TIMESTAMP_DROPPED)
+ x->timestamp_dropped++;
+
+ if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) {
+ x->sa_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) {
+ x->da_rx_filter_fail++;
+ ret = discard_frame;
+ }
+
+ if (rdes2 & RDES2_L3_FILTER_MATCH)
+ x->l3_filter_match++;
+ if (rdes2 & RDES2_L4_FILTER_MATCH)
+ x->l4_filter_match++;
+ if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
+ >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT)
+ x->l3_l4_filter_no_match++;
+
+ return ret;
+}
+
+static int dwmac4_rd_get_tx_len(struct dma_desc *p)
+{
+ return (p->des2 & TDES2_BUFFER1_SIZE_MASK);
+}
+
+static int dwmac4_get_tx_owner(struct dma_desc *p)
+{
+ return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT;
+}
+
+static void dwmac4_set_tx_owner(struct dma_desc *p)
+{
+ p->des3 |= TDES3_OWN;
+}
+
+static void dwmac4_set_rx_owner(struct dma_desc *p)
+{
+ p->des3 |= RDES3_OWN;
+}
+
+static int dwmac4_get_tx_ls(struct dma_desc *p)
+{
+ return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT;
+}
+
+static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
+{
+ return (p->des3 & RDES3_PACKET_SIZE_MASK);
+}
+
+static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des2 |= TDES2_TIMESTAMP_ENABLE;
+}
+
+static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
+{
+ return (p->des3 & TDES3_TIMESTAMP_STATUS)
+ >> TDES3_TIMESTAMP_STATUS_SHIFT;
+}
+
+/* NOTE: For RX CTX bit has to be checked before
+ * HAVE a specific function for TX and another one for RX
+ */
+static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ u64 ns;
+
+ ns = p->des0;
+ /* convert high/sec time stamp value to nanosecond */
+ ns += p->des1 * 1000000000ULL;
+
+ return ns;
+}
+
+static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+
+ return (p->des1 & RDES1_TIMESTAMP_AVAILABLE)
+ >> RDES1_TIMESTAMP_AVAILABLE_SHIFT;
+}
+
+static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ int mode, int end)
+{
+ p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR;
+
+ if (!disable_rx_ic)
+ p->des3 |= RDES3_INT_ON_COMPLETION_EN;
+}
+
+static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ bool csum_flag, int mode, bool tx_own,
+ bool ls)
+{
+ unsigned int tdes3 = p->des3;
+
+ p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK);
+
+ if (is_fs)
+ tdes3 |= TDES3_FIRST_DESCRIPTOR;
+ else
+ tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
+
+ if (likely(csum_flag))
+ tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
+ else
+ tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
+
+ if (ls)
+ tdes3 |= TDES3_LAST_DESCRIPTOR;
+ else
+ tdes3 &= ~TDES3_LAST_DESCRIPTOR;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= TDES3_OWN;
+
+ if (is_fs & tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ wmb();
+
+ p->des3 = tdes3;
+}
+
+static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
+ int len1, int len2, bool tx_own,
+ bool ls, unsigned int tcphdrlen,
+ unsigned int tcppayloadlen)
+{
+ unsigned int tdes3 = p->des3;
+
+ if (len1)
+ p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK);
+
+ if (len2)
+ p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
+ & TDES2_BUFFER2_SIZE_MASK;
+
+ if (is_fs) {
+ tdes3 |= TDES3_FIRST_DESCRIPTOR |
+ TDES3_TCP_SEGMENTATION_ENABLE |
+ ((tcphdrlen << TDES3_HDR_LEN_SHIFT) &
+ TDES3_SLOT_NUMBER_MASK) |
+ ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK));
+ } else {
+ tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
+ }
+
+ if (ls)
+ tdes3 |= TDES3_LAST_DESCRIPTOR;
+ else
+ tdes3 &= ~TDES3_LAST_DESCRIPTOR;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= TDES3_OWN;
+
+ if (is_fs & tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ wmb();
+
+ p->des3 = tdes3;
+}
+
+static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
+{
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
+{
+ p->des2 |= TDES2_INTERRUPT_ON_COMPLETION;
+}
+
+static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
+{
+ struct dma_desc *p = (struct dma_desc *)head;
+ int i;
+
+ pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
+
+ for (i = 0; i < size; i++) {
+ if (p->des0)
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(p),
+ p->des0, p->des1, p->des2, p->des3);
+ p++;
+ }
+}
+
+static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = mss;
+ p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV;
+}
+
+const struct stmmac_desc_ops dwmac4_desc_ops = {
+ .tx_status = dwmac4_wrback_get_tx_status,
+ .rx_status = dwmac4_wrback_get_rx_status,
+ .get_tx_len = dwmac4_rd_get_tx_len,
+ .get_tx_owner = dwmac4_get_tx_owner,
+ .set_tx_owner = dwmac4_set_tx_owner,
+ .set_rx_owner = dwmac4_set_rx_owner,
+ .get_tx_ls = dwmac4_get_tx_ls,
+ .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
+ .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
+ .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
+ .get_timestamp = dwmac4_wrback_get_timestamp,
+ .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status,
+ .set_tx_ic = dwmac4_rd_set_tx_ic,
+ .prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
+ .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
+ .release_tx_desc = dwmac4_release_tx_desc,
+ .init_rx_desc = dwmac4_rd_init_rx_desc,
+ .init_tx_desc = dwmac4_rd_init_tx_desc,
+ .display_ring = dwmac4_display_ring,
+ .set_mss = dwmac4_set_mss_ctxt,
+};
+
+const struct stmmac_mode_ops dwmac4_ring_mode_ops = { };
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
new file mode 100644
index 000000000000..0902a2edeaa9
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -0,0 +1,129 @@
+/*
+ * Header File to describe the DMA descriptors and related definitions specific
+ * for DesignWare databook 4.xx.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#ifndef __DWMAC4_DESCS_H__
+#define __DWMAC4_DESCS_H__
+
+#include <linux/bitops.h>
+
+/* Normal transmit descriptor defines (without split feature) */
+
+/* TDES2 (read format) */
+#define TDES2_BUFFER1_SIZE_MASK GENMASK(13, 0)
+#define TDES2_VLAN_TAG_MASK GENMASK(15, 14)
+#define TDES2_BUFFER2_SIZE_MASK GENMASK(29, 16)
+#define TDES2_BUFFER2_SIZE_MASK_SHIFT 16
+#define TDES2_TIMESTAMP_ENABLE BIT(30)
+#define TDES2_INTERRUPT_ON_COMPLETION BIT(31)
+
+/* TDES3 (read format) */
+#define TDES3_PACKET_SIZE_MASK GENMASK(14, 0)
+#define TDES3_CHECKSUM_INSERTION_MASK GENMASK(17, 16)
+#define TDES3_CHECKSUM_INSERTION_SHIFT 16
+#define TDES3_TCP_PKT_PAYLOAD_MASK GENMASK(17, 0)
+#define TDES3_TCP_SEGMENTATION_ENABLE BIT(18)
+#define TDES3_HDR_LEN_SHIFT 19
+#define TDES3_SLOT_NUMBER_MASK GENMASK(22, 19)
+#define TDES3_SA_INSERT_CTRL_MASK GENMASK(25, 23)
+#define TDES3_CRC_PAD_CTRL_MASK GENMASK(27, 26)
+
+/* TDES3 (write back format) */
+#define TDES3_IP_HDR_ERROR BIT(0)
+#define TDES3_DEFERRED BIT(1)
+#define TDES3_UNDERFLOW_ERROR BIT(2)
+#define TDES3_EXCESSIVE_DEFERRAL BIT(3)
+#define TDES3_COLLISION_COUNT_MASK GENMASK(7, 4)
+#define TDES3_COLLISION_COUNT_SHIFT 4
+#define TDES3_EXCESSIVE_COLLISION BIT(8)
+#define TDES3_LATE_COLLISION BIT(9)
+#define TDES3_NO_CARRIER BIT(10)
+#define TDES3_LOSS_CARRIER BIT(11)
+#define TDES3_PAYLOAD_ERROR BIT(12)
+#define TDES3_PACKET_FLUSHED BIT(13)
+#define TDES3_JABBER_TIMEOUT BIT(14)
+#define TDES3_ERROR_SUMMARY BIT(15)
+#define TDES3_TIMESTAMP_STATUS BIT(17)
+#define TDES3_TIMESTAMP_STATUS_SHIFT 17
+
+/* TDES3 context */
+#define TDES3_CTXT_TCMSSV BIT(26)
+
+/* TDES3 Common */
+#define TDES3_LAST_DESCRIPTOR BIT(28)
+#define TDES3_LAST_DESCRIPTOR_SHIFT 28
+#define TDES3_FIRST_DESCRIPTOR BIT(29)
+#define TDES3_CONTEXT_TYPE BIT(30)
+
+/* TDS3 use for both format (read and write back) */
+#define TDES3_OWN BIT(31)
+#define TDES3_OWN_SHIFT 31
+
+/* Normal receive descriptor defines (without split feature) */
+
+/* RDES0 (write back format) */
+#define RDES0_VLAN_TAG_MASK GENMASK(15, 0)
+
+/* RDES1 (write back format) */
+#define RDES1_IP_PAYLOAD_TYPE_MASK GENMASK(2, 0)
+#define RDES1_IP_HDR_ERROR BIT(3)
+#define RDES1_IPV4_HEADER BIT(4)
+#define RDES1_IPV6_HEADER BIT(5)
+#define RDES1_IP_CSUM_BYPASSED BIT(6)
+#define RDES1_IP_CSUM_ERROR BIT(7)
+#define RDES1_PTP_MSG_TYPE_MASK GENMASK(11, 8)
+#define RDES1_PTP_PACKET_TYPE BIT(12)
+#define RDES1_PTP_VER BIT(13)
+#define RDES1_TIMESTAMP_AVAILABLE BIT(14)
+#define RDES1_TIMESTAMP_AVAILABLE_SHIFT 14
+#define RDES1_TIMESTAMP_DROPPED BIT(15)
+#define RDES1_IP_TYPE1_CSUM_MASK GENMASK(31, 16)
+
+/* RDES2 (write back format) */
+#define RDES2_L3_L4_HEADER_SIZE_MASK GENMASK(9, 0)
+#define RDES2_VLAN_FILTER_STATUS BIT(15)
+#define RDES2_SA_FILTER_FAIL BIT(16)
+#define RDES2_DA_FILTER_FAIL BIT(17)
+#define RDES2_HASH_FILTER_STATUS BIT(18)
+#define RDES2_MAC_ADDR_MATCH_MASK GENMASK(26, 19)
+#define RDES2_HASH_VALUE_MATCH_MASK GENMASK(26, 19)
+#define RDES2_L3_FILTER_MATCH BIT(27)
+#define RDES2_L4_FILTER_MATCH BIT(28)
+#define RDES2_L3_L4_FILT_NB_MATCH_MASK GENMASK(27, 26)
+#define RDES2_L3_L4_FILT_NB_MATCH_SHIFT 26
+
+/* RDES3 (write back format) */
+#define RDES3_PACKET_SIZE_MASK GENMASK(14, 0)
+#define RDES3_ERROR_SUMMARY BIT(15)
+#define RDES3_PACKET_LEN_TYPE_MASK GENMASK(18, 16)
+#define RDES3_DRIBBLE_ERROR BIT(19)
+#define RDES3_RECEIVE_ERROR BIT(20)
+#define RDES3_OVERFLOW_ERROR BIT(21)
+#define RDES3_RECEIVE_WATCHDOG BIT(22)
+#define RDES3_GIANT_PACKET BIT(23)
+#define RDES3_CRC_ERROR BIT(24)
+#define RDES3_RDES0_VALID BIT(25)
+#define RDES3_RDES1_VALID BIT(26)
+#define RDES3_RDES2_VALID BIT(27)
+#define RDES3_LAST_DESCRIPTOR BIT(28)
+#define RDES3_FIRST_DESCRIPTOR BIT(29)
+#define RDES3_CONTEXT_DESCRIPTOR BIT(30)
+
+/* RDES3 (read format) */
+#define RDES3_BUFFER1_VALID_ADDR BIT(24)
+#define RDES3_BUFFER2_VALID_ADDR BIT(25)
+#define RDES3_INT_ON_COMPLETION_EN BIT(30)
+
+/* TDS3 use for both format (read and write back) */
+#define RDES3_OWN BIT(31)
+
+#endif /* __DWMAC4_DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
new file mode 100644
index 000000000000..116151cd6a95
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -0,0 +1,354 @@
+/*
+ * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ * DWC Ether MAC version 4.xx has been used for developing this code.
+ *
+ * This contains the functions to handle the dma.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/io.h>
+#include "dwmac4.h"
+#include "dwmac4_dma.h"
+
+static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
+{
+ u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
+ int i;
+
+ pr_info("dwmac4: Master AXI performs %s burst length\n",
+ (value & DMA_SYS_BUS_FB) ? "fixed" : "any");
+
+ if (axi->axi_lpi_en)
+ value |= DMA_AXI_EN_LPI;
+ if (axi->axi_xit_frm)
+ value |= DMA_AXI_LPI_XIT_FRM;
+
+ value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) <<
+ DMA_AXI_WR_OSR_LMT_SHIFT;
+
+ value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) <<
+ DMA_AXI_RD_OSR_LMT_SHIFT;
+
+ /* Depending on the UNDEF bit the Master AXI will perform any burst
+ * length according to the BLEN programmed (by default all BLEN are
+ * set).
+ */
+ for (i = 0; i < AXI_BLEN; i++) {
+ switch (axi->axi_blen[i]) {
+ case 256:
+ value |= DMA_AXI_BLEN256;
+ break;
+ case 128:
+ value |= DMA_AXI_BLEN128;
+ break;
+ case 64:
+ value |= DMA_AXI_BLEN64;
+ break;
+ case 32:
+ value |= DMA_AXI_BLEN32;
+ break;
+ case 16:
+ value |= DMA_AXI_BLEN16;
+ break;
+ case 8:
+ value |= DMA_AXI_BLEN8;
+ break;
+ case 4:
+ value |= DMA_AXI_BLEN4;
+ break;
+ }
+ }
+
+ writel(value, ioaddr + DMA_SYS_BUS_MODE);
+}
+
+static void dwmac4_dma_init_channel(void __iomem *ioaddr, int pbl,
+ u32 dma_tx_phy, u32 dma_rx_phy,
+ u32 channel)
+{
+ u32 value;
+
+ /* set PBL for each channels. Currently we affect same configuration
+ * on each channel
+ */
+ value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
+ value = value | DMA_BUS_MODE_PBL;
+ writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
+
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
+ value = value | (pbl << DMA_BUS_MODE_PBL_SHIFT);
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
+
+ value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
+ value = value | (pbl << DMA_BUS_MODE_RPBL_SHIFT);
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel));
+
+ writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
+ writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
+}
+
+static void dwmac4_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
+ int aal, u32 dma_tx, u32 dma_rx, int atds)
+{
+ u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
+ int i;
+
+ /* Set the Fixed burst mode */
+ if (fb)
+ value |= DMA_SYS_BUS_FB;
+
+ /* Mixed Burst has no effect when fb is set */
+ if (mb)
+ value |= DMA_SYS_BUS_MB;
+
+ if (aal)
+ value |= DMA_SYS_BUS_AAL;
+
+ writel(value, ioaddr + DMA_SYS_BUS_MODE);
+
+ for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+ dwmac4_dma_init_channel(ioaddr, pbl, dma_tx, dma_rx, i);
+}
+
+static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel)
+{
+ pr_debug(" Channel %d\n", channel);
+ pr_debug("\tDMA_CHAN_CONTROL, offset: 0x%x, val: 0x%x\n", 0,
+ readl(ioaddr + DMA_CHAN_CONTROL(channel)));
+ pr_debug("\tDMA_CHAN_TX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x4,
+ readl(ioaddr + DMA_CHAN_TX_CONTROL(channel)));
+ pr_debug("\tDMA_CHAN_RX_CONTROL, offset: 0x%x, val: 0x%x\n", 0x8,
+ readl(ioaddr + DMA_CHAN_RX_CONTROL(channel)));
+ pr_debug("\tDMA_CHAN_TX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x14,
+ readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_RX_BASE_ADDR, offset: 0x%x, val: 0x%x\n", 0x1c,
+ readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_TX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x20,
+ readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_RX_END_ADDR, offset: 0x%x, val: 0x%x\n", 0x28,
+ readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_TX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x2c,
+ readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel)));
+ pr_debug("\tDMA_CHAN_RX_RING_LEN, offset: 0x%x, val: 0x%x\n", 0x30,
+ readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel)));
+ pr_debug("\tDMA_CHAN_INTR_ENA, offset: 0x%x, val: 0x%x\n", 0x34,
+ readl(ioaddr + DMA_CHAN_INTR_ENA(channel)));
+ pr_debug("\tDMA_CHAN_RX_WATCHDOG, offset: 0x%x, val: 0x%x\n", 0x38,
+ readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel)));
+ pr_debug("\tDMA_CHAN_SLOT_CTRL_STATUS, offset: 0x%x, val: 0x%x\n", 0x3c,
+ readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel)));
+ pr_debug("\tDMA_CHAN_CUR_TX_DESC, offset: 0x%x, val: 0x%x\n", 0x44,
+ readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel)));
+ pr_debug("\tDMA_CHAN_CUR_RX_DESC, offset: 0x%x, val: 0x%x\n", 0x4c,
+ readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel)));
+ pr_debug("\tDMA_CHAN_CUR_TX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x54,
+ readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_CUR_RX_BUF_ADDR, offset: 0x%x, val: 0x%x\n", 0x5c,
+ readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel)));
+ pr_debug("\tDMA_CHAN_STATUS, offset: 0x%x, val: 0x%x\n", 0x60,
+ readl(ioaddr + DMA_CHAN_STATUS(channel)));
+}
+
+static void dwmac4_dump_dma_regs(void __iomem *ioaddr)
+{
+ int i;
+
+ pr_debug(" GMAC4 DMA registers\n");
+
+ for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+ _dwmac4_dump_dma_regs(ioaddr, i);
+}
+
+static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
+{
+ int i;
+
+ for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+ writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i));
+}
+
+static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
+ int rxmode, u32 channel)
+{
+ u32 mtl_tx_op, mtl_rx_op, mtl_rx_int;
+
+ /* Following code only done for channel 0, other channels not yet
+ * supported.
+ */
+ mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+ if (txmode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable TX store and forward mode\n");
+ /* Transmit COE type 2 cannot be done in cut-through mode. */
+ mtl_tx_op |= MTL_OP_MODE_TSF;
+ } else {
+ pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
+ mtl_tx_op &= ~MTL_OP_MODE_TSF;
+ mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
+ /* Set the transmit threshold */
+ if (txmode <= 32)
+ mtl_tx_op |= MTL_OP_MODE_TTC_32;
+ else if (txmode <= 64)
+ mtl_tx_op |= MTL_OP_MODE_TTC_64;
+ else if (txmode <= 96)
+ mtl_tx_op |= MTL_OP_MODE_TTC_96;
+ else if (txmode <= 128)
+ mtl_tx_op |= MTL_OP_MODE_TTC_128;
+ else if (txmode <= 192)
+ mtl_tx_op |= MTL_OP_MODE_TTC_192;
+ else if (txmode <= 256)
+ mtl_tx_op |= MTL_OP_MODE_TTC_256;
+ else if (txmode <= 384)
+ mtl_tx_op |= MTL_OP_MODE_TTC_384;
+ else
+ mtl_tx_op |= MTL_OP_MODE_TTC_512;
+ }
+
+ writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+ mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+
+ if (rxmode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable RX store and forward mode\n");
+ mtl_rx_op |= MTL_OP_MODE_RSF;
+ } else {
+ pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
+ mtl_rx_op &= ~MTL_OP_MODE_RSF;
+ mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
+ if (rxmode <= 32)
+ mtl_rx_op |= MTL_OP_MODE_RTC_32;
+ else if (rxmode <= 64)
+ mtl_rx_op |= MTL_OP_MODE_RTC_64;
+ else if (rxmode <= 96)
+ mtl_rx_op |= MTL_OP_MODE_RTC_96;
+ else
+ mtl_rx_op |= MTL_OP_MODE_RTC_128;
+ }
+
+ writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+
+ /* Enable MTL RX overflow */
+ mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
+ writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
+ ioaddr + MTL_CHAN_INT_CTRL(channel));
+}
+
+static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
+ int rxmode, int rxfifosz)
+{
+ /* Only Channel 0 is actually configured and used */
+ dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
+}
+
+static void dwmac4_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
+{
+ u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
+
+ /* MAC HW feature0 */
+ dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL);
+ dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1;
+ dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2;
+ dma_cap->hash_filter = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4;
+ dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18;
+ dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3;
+ dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5;
+ dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6;
+ dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7;
+ /* MMC */
+ dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8;
+ /* IEEE 1588-2008 */
+ dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12;
+ /* 802.3az - Energy-Efficient Ethernet (EEE) */
+ dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13;
+ /* TX and RX csum */
+ dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14;
+ dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16;
+
+ /* MAC HW feature1 */
+ hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
+ dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
+ dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
+ /* MAC HW feature2 */
+ hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
+ /* TX and RX number of channels */
+ dma_cap->number_rx_channel =
+ ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1;
+ dma_cap->number_tx_channel =
+ ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1;
+
+ /* IEEE 1588-2002 */
+ dma_cap->time_stamp = 0;
+}
+
+/* Enable/disable TSO feature and set MSS */
+static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value;
+
+ if (en) {
+ /* enable TSO */
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ writel(value | DMA_CONTROL_TSE,
+ ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ } else {
+ /* enable TSO */
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ writel(value & ~DMA_CONTROL_TSE,
+ ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ }
+}
+
+const struct stmmac_dma_ops dwmac4_dma_ops = {
+ .reset = dwmac4_dma_reset,
+ .init = dwmac4_dma_init,
+ .axi = dwmac4_dma_axi,
+ .dump_regs = dwmac4_dump_dma_regs,
+ .dma_mode = dwmac4_dma_operation_mode,
+ .enable_dma_irq = dwmac4_enable_dma_irq,
+ .disable_dma_irq = dwmac4_disable_dma_irq,
+ .start_tx = dwmac4_dma_start_tx,
+ .stop_tx = dwmac4_dma_stop_tx,
+ .start_rx = dwmac4_dma_start_rx,
+ .stop_rx = dwmac4_dma_stop_rx,
+ .dma_interrupt = dwmac4_dma_interrupt,
+ .get_hw_feature = dwmac4_get_hw_feature,
+ .rx_watchdog = dwmac4_rx_watchdog,
+ .set_rx_ring_len = dwmac4_set_rx_ring_len,
+ .set_tx_ring_len = dwmac4_set_tx_ring_len,
+ .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
+ .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
+ .enable_tso = dwmac4_enable_tso,
+};
+
+const struct stmmac_dma_ops dwmac410_dma_ops = {
+ .reset = dwmac4_dma_reset,
+ .init = dwmac4_dma_init,
+ .axi = dwmac4_dma_axi,
+ .dump_regs = dwmac4_dump_dma_regs,
+ .dma_mode = dwmac4_dma_operation_mode,
+ .enable_dma_irq = dwmac410_enable_dma_irq,
+ .disable_dma_irq = dwmac4_disable_dma_irq,
+ .start_tx = dwmac4_dma_start_tx,
+ .stop_tx = dwmac4_dma_stop_tx,
+ .start_rx = dwmac4_dma_start_rx,
+ .stop_rx = dwmac4_dma_stop_rx,
+ .dma_interrupt = dwmac4_dma_interrupt,
+ .get_hw_feature = dwmac4_get_hw_feature,
+ .rx_watchdog = dwmac4_rx_watchdog,
+ .set_rx_ring_len = dwmac4_set_rx_ring_len,
+ .set_tx_ring_len = dwmac4_set_tx_ring_len,
+ .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
+ .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
+ .enable_tso = dwmac4_enable_tso,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
new file mode 100644
index 000000000000..1b06df749e2b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -0,0 +1,202 @@
+/*
+ * DWMAC4 DMA Header file.
+ *
+ *
+ * Copyright (C) 2007-2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#ifndef __DWMAC4_DMA_H__
+#define __DWMAC4_DMA_H__
+
+/* Define the max channel number used for tx (also rx).
+ * dwmac4 accepts up to 8 channels for TX (and also 8 channels for RX
+ */
+#define DMA_CHANNEL_NB_MAX 1
+
+#define DMA_BUS_MODE 0x00001000
+#define DMA_SYS_BUS_MODE 0x00001004
+#define DMA_STATUS 0x00001008
+#define DMA_DEBUG_STATUS_0 0x0000100c
+#define DMA_DEBUG_STATUS_1 0x00001010
+#define DMA_DEBUG_STATUS_2 0x00001014
+#define DMA_AXI_BUS_MODE 0x00001028
+
+/* DMA Bus Mode bitmap */
+#define DMA_BUS_MODE_SFT_RESET BIT(0)
+
+/* DMA SYS Bus Mode bitmap */
+#define DMA_BUS_MODE_SPH BIT(24)
+#define DMA_BUS_MODE_PBL BIT(16)
+#define DMA_BUS_MODE_PBL_SHIFT 16
+#define DMA_BUS_MODE_RPBL_SHIFT 16
+#define DMA_BUS_MODE_MB BIT(14)
+#define DMA_BUS_MODE_FB BIT(0)
+
+/* DMA Interrupt top status */
+#define DMA_STATUS_MAC BIT(17)
+#define DMA_STATUS_MTL BIT(16)
+#define DMA_STATUS_CHAN7 BIT(7)
+#define DMA_STATUS_CHAN6 BIT(6)
+#define DMA_STATUS_CHAN5 BIT(5)
+#define DMA_STATUS_CHAN4 BIT(4)
+#define DMA_STATUS_CHAN3 BIT(3)
+#define DMA_STATUS_CHAN2 BIT(2)
+#define DMA_STATUS_CHAN1 BIT(1)
+#define DMA_STATUS_CHAN0 BIT(0)
+
+/* DMA debug status bitmap */
+#define DMA_DEBUG_STATUS_TS_MASK 0xf
+#define DMA_DEBUG_STATUS_RS_MASK 0xf
+
+/* DMA AXI bitmap */
+#define DMA_AXI_EN_LPI BIT(31)
+#define DMA_AXI_LPI_XIT_FRM BIT(30)
+#define DMA_AXI_WR_OSR_LMT GENMASK(27, 24)
+#define DMA_AXI_WR_OSR_LMT_SHIFT 24
+#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16)
+#define DMA_AXI_RD_OSR_LMT_SHIFT 16
+
+#define DMA_AXI_OSR_MAX 0xf
+#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
+ (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
+
+#define DMA_SYS_BUS_MB BIT(14)
+#define DMA_AXI_1KBBE BIT(13)
+#define DMA_SYS_BUS_AAL BIT(12)
+#define DMA_AXI_BLEN256 BIT(7)
+#define DMA_AXI_BLEN128 BIT(6)
+#define DMA_AXI_BLEN64 BIT(5)
+#define DMA_AXI_BLEN32 BIT(4)
+#define DMA_AXI_BLEN16 BIT(3)
+#define DMA_AXI_BLEN8 BIT(2)
+#define DMA_AXI_BLEN4 BIT(1)
+#define DMA_SYS_BUS_FB BIT(0)
+
+#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
+ DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
+ DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
+ DMA_AXI_BLEN4)
+
+#define DMA_AXI_BURST_LEN_MASK 0x000000FE
+
+/* Following DMA defines are chanels oriented */
+#define DMA_CHAN_BASE_ADDR 0x00001100
+#define DMA_CHAN_BASE_OFFSET 0x80
+#define DMA_CHANX_BASE_ADDR(x) (DMA_CHAN_BASE_ADDR + \
+ (x * DMA_CHAN_BASE_OFFSET))
+#define DMA_CHAN_REG_NUMBER 17
+
+#define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x)
+#define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4)
+#define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8)
+#define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14)
+#define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c)
+#define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20)
+#define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28)
+#define DMA_CHAN_TX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x2c)
+#define DMA_CHAN_RX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x30)
+#define DMA_CHAN_INTR_ENA(x) (DMA_CHANX_BASE_ADDR(x) + 0x34)
+#define DMA_CHAN_RX_WATCHDOG(x) (DMA_CHANX_BASE_ADDR(x) + 0x38)
+#define DMA_CHAN_SLOT_CTRL_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x3c)
+#define DMA_CHAN_CUR_TX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x44)
+#define DMA_CHAN_CUR_RX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x4c)
+#define DMA_CHAN_CUR_TX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x54)
+#define DMA_CHAN_CUR_RX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x5c)
+#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60)
+
+/* DMA Control X */
+#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
+
+/* DMA Tx Channel X Control register defines */
+#define DMA_CONTROL_TSE BIT(12)
+#define DMA_CONTROL_OSP BIT(4)
+#define DMA_CONTROL_ST BIT(0)
+
+/* DMA Rx Channel X Control register defines */
+#define DMA_CONTROL_SR BIT(0)
+
+/* Interrupt status per channel */
+#define DMA_CHAN_STATUS_REB GENMASK(21, 19)
+#define DMA_CHAN_STATUS_REB_SHIFT 19
+#define DMA_CHAN_STATUS_TEB GENMASK(18, 16)
+#define DMA_CHAN_STATUS_TEB_SHIFT 16
+#define DMA_CHAN_STATUS_NIS BIT(15)
+#define DMA_CHAN_STATUS_AIS BIT(14)
+#define DMA_CHAN_STATUS_CDE BIT(13)
+#define DMA_CHAN_STATUS_FBE BIT(12)
+#define DMA_CHAN_STATUS_ERI BIT(11)
+#define DMA_CHAN_STATUS_ETI BIT(10)
+#define DMA_CHAN_STATUS_RWT BIT(9)
+#define DMA_CHAN_STATUS_RPS BIT(8)
+#define DMA_CHAN_STATUS_RBU BIT(7)
+#define DMA_CHAN_STATUS_RI BIT(6)
+#define DMA_CHAN_STATUS_TBU BIT(2)
+#define DMA_CHAN_STATUS_TPS BIT(1)
+#define DMA_CHAN_STATUS_TI BIT(0)
+
+/* Interrupt enable bits per channel */
+#define DMA_CHAN_INTR_ENA_NIE BIT(16)
+#define DMA_CHAN_INTR_ENA_AIE BIT(15)
+#define DMA_CHAN_INTR_ENA_NIE_4_10 BIT(15)
+#define DMA_CHAN_INTR_ENA_AIE_4_10 BIT(14)
+#define DMA_CHAN_INTR_ENA_CDE BIT(13)
+#define DMA_CHAN_INTR_ENA_FBE BIT(12)
+#define DMA_CHAN_INTR_ENA_ERE BIT(11)
+#define DMA_CHAN_INTR_ENA_ETE BIT(10)
+#define DMA_CHAN_INTR_ENA_RWE BIT(9)
+#define DMA_CHAN_INTR_ENA_RSE BIT(8)
+#define DMA_CHAN_INTR_ENA_RBUE BIT(7)
+#define DMA_CHAN_INTR_ENA_RIE BIT(6)
+#define DMA_CHAN_INTR_ENA_TBUE BIT(2)
+#define DMA_CHAN_INTR_ENA_TSE BIT(1)
+#define DMA_CHAN_INTR_ENA_TIE BIT(0)
+
+#define DMA_CHAN_INTR_NORMAL (DMA_CHAN_INTR_ENA_NIE | \
+ DMA_CHAN_INTR_ENA_RIE | \
+ DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_ABNORMAL (DMA_CHAN_INTR_ENA_AIE | \
+ DMA_CHAN_INTR_ENA_FBE)
+/* DMA default interrupt mask for 4.00 */
+#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \
+ DMA_CHAN_INTR_ABNORMAL)
+
+#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \
+ DMA_CHAN_INTR_ENA_RIE | \
+ DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_ABNORMAL_4_10 (DMA_CHAN_INTR_ENA_AIE_4_10 | \
+ DMA_CHAN_INTR_ENA_FBE)
+/* DMA default interrupt mask for 4.10a */
+#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \
+ DMA_CHAN_INTR_ABNORMAL_4_10)
+
+/* channel 0 specific fields */
+#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12)
+#define DMA_CHAN0_DBG_STAT_TPS_SHIFT 12
+#define DMA_CHAN0_DBG_STAT_RPS GENMASK(11, 8)
+#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
+
+int dwmac4_dma_reset(void __iomem *ioaddr);
+void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
+void dwmac4_enable_dma_irq(void __iomem *ioaddr);
+void dwmac410_enable_dma_irq(void __iomem *ioaddr);
+void dwmac4_disable_dma_irq(void __iomem *ioaddr);
+void dwmac4_dma_start_tx(void __iomem *ioaddr);
+void dwmac4_dma_stop_tx(void __iomem *ioaddr);
+void dwmac4_dma_start_rx(void __iomem *ioaddr);
+void dwmac4_dma_stop_rx(void __iomem *ioaddr);
+int dwmac4_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x);
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len);
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len);
+void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+
+#endif /* __DWMAC4_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
new file mode 100644
index 000000000000..c7326d5b2f43
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2007-2015 STMicroelectronics Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include "common.h"
+#include "dwmac4_dma.h"
+#include "dwmac4.h"
+
+int dwmac4_dma_reset(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ int limit;
+
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ break;
+ mdelay(10);
+ }
+
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+{
+ writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0));
+}
+
+void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+{
+ writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0));
+}
+
+void dwmac4_dma_start_tx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+ value |= DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+ value = readl(ioaddr + GMAC_CONFIG);
+ value |= GMAC_CONFIG_TE;
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_dma_stop_tx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+ value &= ~DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
+
+ value = readl(ioaddr + GMAC_CONFIG);
+ value &= ~GMAC_CONFIG_TE;
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_dma_start_rx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+ value |= DMA_CONTROL_SR;
+
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+ value = readl(ioaddr + GMAC_CONFIG);
+ value |= GMAC_CONFIG_RE;
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_dma_stop_rx(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+ value &= ~DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
+
+ value = readl(ioaddr + GMAC_CONFIG);
+ value &= ~GMAC_CONFIG_RE;
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len)
+{
+ writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0));
+}
+
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len)
+{
+ writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0));
+}
+
+void dwmac4_enable_dma_irq(void __iomem *ioaddr)
+{
+ writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
+ DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+}
+
+void dwmac410_enable_dma_irq(void __iomem *ioaddr)
+{
+ writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
+ ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+}
+
+void dwmac4_disable_dma_irq(void __iomem *ioaddr)
+{
+ writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+}
+
+int dwmac4_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x)
+{
+ int ret = 0;
+
+ u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0));
+
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
+ if (unlikely(intr_status & DMA_CHAN_STATUS_RBU))
+ x->rx_buf_unav_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_RPS))
+ x->rx_process_stopped_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_RWT))
+ x->rx_watchdog_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_ETI))
+ x->tx_early_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_TPS)) {
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(intr_status & DMA_CHAN_STATUS_FBE)) {
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (likely(intr_status & DMA_CHAN_STATUS_NIS)) {
+ x->normal_irq_n++;
+ if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
+ u32 value;
+
+ value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
+ /* to schedule NAPI on real RIE event. */
+ if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
+ x->rx_normal_irq_n++;
+ ret |= handle_rx;
+ }
+ }
+ if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
+ x->tx_normal_irq_n++;
+ ret |= handle_tx;
+ }
+ if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
+ x->rx_early_irq++;
+ }
+
+ /* Clear the interrupt by writing a logic 1 to the chanX interrupt
+ * status [21-0] expect reserved bits [5-3]
+ */
+ writel((intr_status & 0x3fffc7),
+ ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0));
+
+ return ret;
+}
+
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ /* For MAC Addr registers se have to set the Address Enable (AE)
+ * bit that has no effect on the High Reg 0 where the bit 31 (MO)
+ * is RO.
+ */
+ data |= (STMMAC_CHAN0 << GMAC_HI_DCS_SHIFT);
+ writel(data | GMAC_HI_REG_AE, ioaddr + high);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + low);
+}
+
+/* Enable disable MAC RX/TX */
+void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+
+ if (enable)
+ value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE;
+ else
+ value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE);
+
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low)
+{
+ unsigned int hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + high);
+ lo_addr = readl(ioaddr + low);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index cfb018c7c5eb..38f19c99cf59 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -411,6 +411,26 @@ static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats)
}
}
+static void enh_desc_display_ring(void *head, unsigned int size, bool rx)
+{
+ struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
+ int i;
+
+ pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
+
+ for (i = 0; i < size; i++) {
+ u64 x;
+
+ x = *(u64 *)ep;
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, (unsigned int)virt_to_phys(ep),
+ (unsigned int)x, (unsigned int)(x >> 32),
+ ep->basic.des2, ep->basic.des3);
+ ep++;
+ }
+ pr_info("\n");
+}
+
const struct stmmac_desc_ops enh_desc_ops = {
.tx_status = enh_desc_get_tx_status,
.rx_status = enh_desc_get_rx_status,
@@ -430,4 +450,5 @@ const struct stmmac_desc_ops enh_desc_ops = {
.get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
.get_timestamp = enh_desc_get_timestamp,
.get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
+ .display_ring = enh_desc_display_ring,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
index 192c2491330b..38a1a5603293 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc.h
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -35,6 +35,10 @@
* current value.*/
#define MMC_CNTRL_PRESET 0x10
#define MMC_CNTRL_FULL_HALF_PRESET 0x20
+
+#define MMC_GMAC4_OFFSET 0x700
+#define MMC_GMAC3_X_OFFSET 0x100
+
struct stmmac_counters {
unsigned int mmc_tx_octetcount_gb;
unsigned int mmc_tx_framecount_gb;
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 3f20bb1fe570..ce9aa792857b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -28,12 +28,12 @@
/* MAC Management Counters register offset */
-#define MMC_CNTRL 0x00000100 /* MMC Control */
-#define MMC_RX_INTR 0x00000104 /* MMC RX Interrupt */
-#define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */
-#define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */
-#define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */
-#define MMC_DEFAULT_MASK 0xffffffff
+#define MMC_CNTRL 0x00 /* MMC Control */
+#define MMC_RX_INTR 0x04 /* MMC RX Interrupt */
+#define MMC_TX_INTR 0x08 /* MMC TX Interrupt */
+#define MMC_RX_INTR_MASK 0x0c /* MMC Interrupt Mask */
+#define MMC_TX_INTR_MASK 0x10 /* MMC Interrupt Mask */
+#define MMC_DEFAULT_MASK 0xffffffff
/* MMC TX counter registers */
@@ -41,115 +41,115 @@
* _GB register stands for good and bad frames
* _G is for good only.
*/
-#define MMC_TX_OCTETCOUNT_GB 0x00000114
-#define MMC_TX_FRAMECOUNT_GB 0x00000118
-#define MMC_TX_BROADCASTFRAME_G 0x0000011c
-#define MMC_TX_MULTICASTFRAME_G 0x00000120
-#define MMC_TX_64_OCTETS_GB 0x00000124
-#define MMC_TX_65_TO_127_OCTETS_GB 0x00000128
-#define MMC_TX_128_TO_255_OCTETS_GB 0x0000012c
-#define MMC_TX_256_TO_511_OCTETS_GB 0x00000130
-#define MMC_TX_512_TO_1023_OCTETS_GB 0x00000134
-#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x00000138
-#define MMC_TX_UNICAST_GB 0x0000013c
-#define MMC_TX_MULTICAST_GB 0x00000140
-#define MMC_TX_BROADCAST_GB 0x00000144
-#define MMC_TX_UNDERFLOW_ERROR 0x00000148
-#define MMC_TX_SINGLECOL_G 0x0000014c
-#define MMC_TX_MULTICOL_G 0x00000150
-#define MMC_TX_DEFERRED 0x00000154
-#define MMC_TX_LATECOL 0x00000158
-#define MMC_TX_EXESSCOL 0x0000015c
-#define MMC_TX_CARRIER_ERROR 0x00000160
-#define MMC_TX_OCTETCOUNT_G 0x00000164
-#define MMC_TX_FRAMECOUNT_G 0x00000168
-#define MMC_TX_EXCESSDEF 0x0000016c
-#define MMC_TX_PAUSE_FRAME 0x00000170
-#define MMC_TX_VLAN_FRAME_G 0x00000174
+#define MMC_TX_OCTETCOUNT_GB 0x14
+#define MMC_TX_FRAMECOUNT_GB 0x18
+#define MMC_TX_BROADCASTFRAME_G 0x1c
+#define MMC_TX_MULTICASTFRAME_G 0x20
+#define MMC_TX_64_OCTETS_GB 0x24
+#define MMC_TX_65_TO_127_OCTETS_GB 0x28
+#define MMC_TX_128_TO_255_OCTETS_GB 0x2c
+#define MMC_TX_256_TO_511_OCTETS_GB 0x30
+#define MMC_TX_512_TO_1023_OCTETS_GB 0x34
+#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x38
+#define MMC_TX_UNICAST_GB 0x3c
+#define MMC_TX_MULTICAST_GB 0x40
+#define MMC_TX_BROADCAST_GB 0x44
+#define MMC_TX_UNDERFLOW_ERROR 0x48
+#define MMC_TX_SINGLECOL_G 0x4c
+#define MMC_TX_MULTICOL_G 0x50
+#define MMC_TX_DEFERRED 0x54
+#define MMC_TX_LATECOL 0x58
+#define MMC_TX_EXESSCOL 0x5c
+#define MMC_TX_CARRIER_ERROR 0x60
+#define MMC_TX_OCTETCOUNT_G 0x64
+#define MMC_TX_FRAMECOUNT_G 0x68
+#define MMC_TX_EXCESSDEF 0x6c
+#define MMC_TX_PAUSE_FRAME 0x70
+#define MMC_TX_VLAN_FRAME_G 0x74
/* MMC RX counter registers */
-#define MMC_RX_FRAMECOUNT_GB 0x00000180
-#define MMC_RX_OCTETCOUNT_GB 0x00000184
-#define MMC_RX_OCTETCOUNT_G 0x00000188
-#define MMC_RX_BROADCASTFRAME_G 0x0000018c
-#define MMC_RX_MULTICASTFRAME_G 0x00000190
-#define MMC_RX_CRC_ERROR 0x00000194
-#define MMC_RX_ALIGN_ERROR 0x00000198
-#define MMC_RX_RUN_ERROR 0x0000019C
-#define MMC_RX_JABBER_ERROR 0x000001A0
-#define MMC_RX_UNDERSIZE_G 0x000001A4
-#define MMC_RX_OVERSIZE_G 0x000001A8
-#define MMC_RX_64_OCTETS_GB 0x000001AC
-#define MMC_RX_65_TO_127_OCTETS_GB 0x000001b0
-#define MMC_RX_128_TO_255_OCTETS_GB 0x000001b4
-#define MMC_RX_256_TO_511_OCTETS_GB 0x000001b8
-#define MMC_RX_512_TO_1023_OCTETS_GB 0x000001bc
-#define MMC_RX_1024_TO_MAX_OCTETS_GB 0x000001c0
-#define MMC_RX_UNICAST_G 0x000001c4
-#define MMC_RX_LENGTH_ERROR 0x000001c8
-#define MMC_RX_AUTOFRANGETYPE 0x000001cc
-#define MMC_RX_PAUSE_FRAMES 0x000001d0
-#define MMC_RX_FIFO_OVERFLOW 0x000001d4
-#define MMC_RX_VLAN_FRAMES_GB 0x000001d8
-#define MMC_RX_WATCHDOG_ERROR 0x000001dc
+#define MMC_RX_FRAMECOUNT_GB 0x80
+#define MMC_RX_OCTETCOUNT_GB 0x84
+#define MMC_RX_OCTETCOUNT_G 0x88
+#define MMC_RX_BROADCASTFRAME_G 0x8c
+#define MMC_RX_MULTICASTFRAME_G 0x90
+#define MMC_RX_CRC_ERROR 0x94
+#define MMC_RX_ALIGN_ERROR 0x98
+#define MMC_RX_RUN_ERROR 0x9C
+#define MMC_RX_JABBER_ERROR 0xA0
+#define MMC_RX_UNDERSIZE_G 0xA4
+#define MMC_RX_OVERSIZE_G 0xA8
+#define MMC_RX_64_OCTETS_GB 0xAC
+#define MMC_RX_65_TO_127_OCTETS_GB 0xb0
+#define MMC_RX_128_TO_255_OCTETS_GB 0xb4
+#define MMC_RX_256_TO_511_OCTETS_GB 0xb8
+#define MMC_RX_512_TO_1023_OCTETS_GB 0xbc
+#define MMC_RX_1024_TO_MAX_OCTETS_GB 0xc0
+#define MMC_RX_UNICAST_G 0xc4
+#define MMC_RX_LENGTH_ERROR 0xc8
+#define MMC_RX_AUTOFRANGETYPE 0xcc
+#define MMC_RX_PAUSE_FRAMES 0xd0
+#define MMC_RX_FIFO_OVERFLOW 0xd4
+#define MMC_RX_VLAN_FRAMES_GB 0xd8
+#define MMC_RX_WATCHDOG_ERROR 0xdc
/* IPC*/
-#define MMC_RX_IPC_INTR_MASK 0x00000200
-#define MMC_RX_IPC_INTR 0x00000208
+#define MMC_RX_IPC_INTR_MASK 0x100
+#define MMC_RX_IPC_INTR 0x108
/* IPv4*/
-#define MMC_RX_IPV4_GD 0x00000210
-#define MMC_RX_IPV4_HDERR 0x00000214
-#define MMC_RX_IPV4_NOPAY 0x00000218
-#define MMC_RX_IPV4_FRAG 0x0000021C
-#define MMC_RX_IPV4_UDSBL 0x00000220
+#define MMC_RX_IPV4_GD 0x110
+#define MMC_RX_IPV4_HDERR 0x114
+#define MMC_RX_IPV4_NOPAY 0x118
+#define MMC_RX_IPV4_FRAG 0x11C
+#define MMC_RX_IPV4_UDSBL 0x120
-#define MMC_RX_IPV4_GD_OCTETS 0x00000250
-#define MMC_RX_IPV4_HDERR_OCTETS 0x00000254
-#define MMC_RX_IPV4_NOPAY_OCTETS 0x00000258
-#define MMC_RX_IPV4_FRAG_OCTETS 0x0000025c
-#define MMC_RX_IPV4_UDSBL_OCTETS 0x00000260
+#define MMC_RX_IPV4_GD_OCTETS 0x150
+#define MMC_RX_IPV4_HDERR_OCTETS 0x154
+#define MMC_RX_IPV4_NOPAY_OCTETS 0x158
+#define MMC_RX_IPV4_FRAG_OCTETS 0x15c
+#define MMC_RX_IPV4_UDSBL_OCTETS 0x160
/* IPV6*/
-#define MMC_RX_IPV6_GD_OCTETS 0x00000264
-#define MMC_RX_IPV6_HDERR_OCTETS 0x00000268
-#define MMC_RX_IPV6_NOPAY_OCTETS 0x0000026c
+#define MMC_RX_IPV6_GD_OCTETS 0x164
+#define MMC_RX_IPV6_HDERR_OCTETS 0x168
+#define MMC_RX_IPV6_NOPAY_OCTETS 0x16c
-#define MMC_RX_IPV6_GD 0x00000224
-#define MMC_RX_IPV6_HDERR 0x00000228
-#define MMC_RX_IPV6_NOPAY 0x0000022c
+#define MMC_RX_IPV6_GD 0x124
+#define MMC_RX_IPV6_HDERR 0x128
+#define MMC_RX_IPV6_NOPAY 0x12c
/* Protocols*/
-#define MMC_RX_UDP_GD 0x00000230
-#define MMC_RX_UDP_ERR 0x00000234
-#define MMC_RX_TCP_GD 0x00000238
-#define MMC_RX_TCP_ERR 0x0000023c
-#define MMC_RX_ICMP_GD 0x00000240
-#define MMC_RX_ICMP_ERR 0x00000244
+#define MMC_RX_UDP_GD 0x130
+#define MMC_RX_UDP_ERR 0x134
+#define MMC_RX_TCP_GD 0x138
+#define MMC_RX_TCP_ERR 0x13c
+#define MMC_RX_ICMP_GD 0x140
+#define MMC_RX_ICMP_ERR 0x144
-#define MMC_RX_UDP_GD_OCTETS 0x00000270
-#define MMC_RX_UDP_ERR_OCTETS 0x00000274
-#define MMC_RX_TCP_GD_OCTETS 0x00000278
-#define MMC_RX_TCP_ERR_OCTETS 0x0000027c
-#define MMC_RX_ICMP_GD_OCTETS 0x00000280
-#define MMC_RX_ICMP_ERR_OCTETS 0x00000284
+#define MMC_RX_UDP_GD_OCTETS 0x170
+#define MMC_RX_UDP_ERR_OCTETS 0x174
+#define MMC_RX_TCP_GD_OCTETS 0x178
+#define MMC_RX_TCP_ERR_OCTETS 0x17c
+#define MMC_RX_ICMP_GD_OCTETS 0x180
+#define MMC_RX_ICMP_ERR_OCTETS 0x184
-void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
+void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
{
- u32 value = readl(ioaddr + MMC_CNTRL);
+ u32 value = readl(mmcaddr + MMC_CNTRL);
value |= (mode & 0x3F);
- writel(value, ioaddr + MMC_CNTRL);
+ writel(value, mmcaddr + MMC_CNTRL);
pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
MMC_CNTRL, value);
}
/* To mask all all interrupts.*/
-void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
+void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
{
- writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
- writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
- writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_IPC_INTR_MASK);
}
/* This reads the MAC core counters (if actaully supported).
@@ -157,111 +157,116 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
* counter after a read. So all the field of the mmc struct
* have to be incremented.
*/
-void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc)
+void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
{
- mmc->mmc_tx_octetcount_gb += readl(ioaddr + MMC_TX_OCTETCOUNT_GB);
- mmc->mmc_tx_framecount_gb += readl(ioaddr + MMC_TX_FRAMECOUNT_GB);
- mmc->mmc_tx_broadcastframe_g += readl(ioaddr + MMC_TX_BROADCASTFRAME_G);
- mmc->mmc_tx_multicastframe_g += readl(ioaddr + MMC_TX_MULTICASTFRAME_G);
- mmc->mmc_tx_64_octets_gb += readl(ioaddr + MMC_TX_64_OCTETS_GB);
+ mmc->mmc_tx_octetcount_gb += readl(mmcaddr + MMC_TX_OCTETCOUNT_GB);
+ mmc->mmc_tx_framecount_gb += readl(mmcaddr + MMC_TX_FRAMECOUNT_GB);
+ mmc->mmc_tx_broadcastframe_g += readl(mmcaddr +
+ MMC_TX_BROADCASTFRAME_G);
+ mmc->mmc_tx_multicastframe_g += readl(mmcaddr +
+ MMC_TX_MULTICASTFRAME_G);
+ mmc->mmc_tx_64_octets_gb += readl(mmcaddr + MMC_TX_64_OCTETS_GB);
mmc->mmc_tx_65_to_127_octets_gb +=
- readl(ioaddr + MMC_TX_65_TO_127_OCTETS_GB);
+ readl(mmcaddr + MMC_TX_65_TO_127_OCTETS_GB);
mmc->mmc_tx_128_to_255_octets_gb +=
- readl(ioaddr + MMC_TX_128_TO_255_OCTETS_GB);
+ readl(mmcaddr + MMC_TX_128_TO_255_OCTETS_GB);
mmc->mmc_tx_256_to_511_octets_gb +=
- readl(ioaddr + MMC_TX_256_TO_511_OCTETS_GB);
+ readl(mmcaddr + MMC_TX_256_TO_511_OCTETS_GB);
mmc->mmc_tx_512_to_1023_octets_gb +=
- readl(ioaddr + MMC_TX_512_TO_1023_OCTETS_GB);
+ readl(mmcaddr + MMC_TX_512_TO_1023_OCTETS_GB);
mmc->mmc_tx_1024_to_max_octets_gb +=
- readl(ioaddr + MMC_TX_1024_TO_MAX_OCTETS_GB);
- mmc->mmc_tx_unicast_gb += readl(ioaddr + MMC_TX_UNICAST_GB);
- mmc->mmc_tx_multicast_gb += readl(ioaddr + MMC_TX_MULTICAST_GB);
- mmc->mmc_tx_broadcast_gb += readl(ioaddr + MMC_TX_BROADCAST_GB);
- mmc->mmc_tx_underflow_error += readl(ioaddr + MMC_TX_UNDERFLOW_ERROR);
- mmc->mmc_tx_singlecol_g += readl(ioaddr + MMC_TX_SINGLECOL_G);
- mmc->mmc_tx_multicol_g += readl(ioaddr + MMC_TX_MULTICOL_G);
- mmc->mmc_tx_deferred += readl(ioaddr + MMC_TX_DEFERRED);
- mmc->mmc_tx_latecol += readl(ioaddr + MMC_TX_LATECOL);
- mmc->mmc_tx_exesscol += readl(ioaddr + MMC_TX_EXESSCOL);
- mmc->mmc_tx_carrier_error += readl(ioaddr + MMC_TX_CARRIER_ERROR);
- mmc->mmc_tx_octetcount_g += readl(ioaddr + MMC_TX_OCTETCOUNT_G);
- mmc->mmc_tx_framecount_g += readl(ioaddr + MMC_TX_FRAMECOUNT_G);
- mmc->mmc_tx_excessdef += readl(ioaddr + MMC_TX_EXCESSDEF);
- mmc->mmc_tx_pause_frame += readl(ioaddr + MMC_TX_PAUSE_FRAME);
- mmc->mmc_tx_vlan_frame_g += readl(ioaddr + MMC_TX_VLAN_FRAME_G);
+ readl(mmcaddr + MMC_TX_1024_TO_MAX_OCTETS_GB);
+ mmc->mmc_tx_unicast_gb += readl(mmcaddr + MMC_TX_UNICAST_GB);
+ mmc->mmc_tx_multicast_gb += readl(mmcaddr + MMC_TX_MULTICAST_GB);
+ mmc->mmc_tx_broadcast_gb += readl(mmcaddr + MMC_TX_BROADCAST_GB);
+ mmc->mmc_tx_underflow_error += readl(mmcaddr + MMC_TX_UNDERFLOW_ERROR);
+ mmc->mmc_tx_singlecol_g += readl(mmcaddr + MMC_TX_SINGLECOL_G);
+ mmc->mmc_tx_multicol_g += readl(mmcaddr + MMC_TX_MULTICOL_G);
+ mmc->mmc_tx_deferred += readl(mmcaddr + MMC_TX_DEFERRED);
+ mmc->mmc_tx_latecol += readl(mmcaddr + MMC_TX_LATECOL);
+ mmc->mmc_tx_exesscol += readl(mmcaddr + MMC_TX_EXESSCOL);
+ mmc->mmc_tx_carrier_error += readl(mmcaddr + MMC_TX_CARRIER_ERROR);
+ mmc->mmc_tx_octetcount_g += readl(mmcaddr + MMC_TX_OCTETCOUNT_G);
+ mmc->mmc_tx_framecount_g += readl(mmcaddr + MMC_TX_FRAMECOUNT_G);
+ mmc->mmc_tx_excessdef += readl(mmcaddr + MMC_TX_EXCESSDEF);
+ mmc->mmc_tx_pause_frame += readl(mmcaddr + MMC_TX_PAUSE_FRAME);
+ mmc->mmc_tx_vlan_frame_g += readl(mmcaddr + MMC_TX_VLAN_FRAME_G);
/* MMC RX counter registers */
- mmc->mmc_rx_framecount_gb += readl(ioaddr + MMC_RX_FRAMECOUNT_GB);
- mmc->mmc_rx_octetcount_gb += readl(ioaddr + MMC_RX_OCTETCOUNT_GB);
- mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G);
- mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G);
- mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G);
- mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERROR);
- mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR);
- mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR);
- mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
- mmc->mmc_rx_undersize_g += readl(ioaddr + MMC_RX_UNDERSIZE_G);
- mmc->mmc_rx_oversize_g += readl(ioaddr + MMC_RX_OVERSIZE_G);
- mmc->mmc_rx_64_octets_gb += readl(ioaddr + MMC_RX_64_OCTETS_GB);
+ mmc->mmc_rx_framecount_gb += readl(mmcaddr + MMC_RX_FRAMECOUNT_GB);
+ mmc->mmc_rx_octetcount_gb += readl(mmcaddr + MMC_RX_OCTETCOUNT_GB);
+ mmc->mmc_rx_octetcount_g += readl(mmcaddr + MMC_RX_OCTETCOUNT_G);
+ mmc->mmc_rx_broadcastframe_g += readl(mmcaddr +
+ MMC_RX_BROADCASTFRAME_G);
+ mmc->mmc_rx_multicastframe_g += readl(mmcaddr +
+ MMC_RX_MULTICASTFRAME_G);
+ mmc->mmc_rx_crc_error += readl(mmcaddr + MMC_RX_CRC_ERROR);
+ mmc->mmc_rx_align_error += readl(mmcaddr + MMC_RX_ALIGN_ERROR);
+ mmc->mmc_rx_run_error += readl(mmcaddr + MMC_RX_RUN_ERROR);
+ mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_RX_JABBER_ERROR);
+ mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_RX_UNDERSIZE_G);
+ mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_RX_OVERSIZE_G);
+ mmc->mmc_rx_64_octets_gb += readl(mmcaddr + MMC_RX_64_OCTETS_GB);
mmc->mmc_rx_65_to_127_octets_gb +=
- readl(ioaddr + MMC_RX_65_TO_127_OCTETS_GB);
+ readl(mmcaddr + MMC_RX_65_TO_127_OCTETS_GB);
mmc->mmc_rx_128_to_255_octets_gb +=
- readl(ioaddr + MMC_RX_128_TO_255_OCTETS_GB);
+ readl(mmcaddr + MMC_RX_128_TO_255_OCTETS_GB);
mmc->mmc_rx_256_to_511_octets_gb +=
- readl(ioaddr + MMC_RX_256_TO_511_OCTETS_GB);
+ readl(mmcaddr + MMC_RX_256_TO_511_OCTETS_GB);
mmc->mmc_rx_512_to_1023_octets_gb +=
- readl(ioaddr + MMC_RX_512_TO_1023_OCTETS_GB);
+ readl(mmcaddr + MMC_RX_512_TO_1023_OCTETS_GB);
mmc->mmc_rx_1024_to_max_octets_gb +=
- readl(ioaddr + MMC_RX_1024_TO_MAX_OCTETS_GB);
- mmc->mmc_rx_unicast_g += readl(ioaddr + MMC_RX_UNICAST_G);
- mmc->mmc_rx_length_error += readl(ioaddr + MMC_RX_LENGTH_ERROR);
- mmc->mmc_rx_autofrangetype += readl(ioaddr + MMC_RX_AUTOFRANGETYPE);
- mmc->mmc_rx_pause_frames += readl(ioaddr + MMC_RX_PAUSE_FRAMES);
- mmc->mmc_rx_fifo_overflow += readl(ioaddr + MMC_RX_FIFO_OVERFLOW);
- mmc->mmc_rx_vlan_frames_gb += readl(ioaddr + MMC_RX_VLAN_FRAMES_GB);
- mmc->mmc_rx_watchdog_error += readl(ioaddr + MMC_RX_WATCHDOG_ERROR);
+ readl(mmcaddr + MMC_RX_1024_TO_MAX_OCTETS_GB);
+ mmc->mmc_rx_unicast_g += readl(mmcaddr + MMC_RX_UNICAST_G);
+ mmc->mmc_rx_length_error += readl(mmcaddr + MMC_RX_LENGTH_ERROR);
+ mmc->mmc_rx_autofrangetype += readl(mmcaddr + MMC_RX_AUTOFRANGETYPE);
+ mmc->mmc_rx_pause_frames += readl(mmcaddr + MMC_RX_PAUSE_FRAMES);
+ mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW);
+ mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB);
+ mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR);
/* IPC */
- mmc->mmc_rx_ipc_intr_mask += readl(ioaddr + MMC_RX_IPC_INTR_MASK);
- mmc->mmc_rx_ipc_intr += readl(ioaddr + MMC_RX_IPC_INTR);
+ mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK);
+ mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR);
/* IPv4 */
- mmc->mmc_rx_ipv4_gd += readl(ioaddr + MMC_RX_IPV4_GD);
- mmc->mmc_rx_ipv4_hderr += readl(ioaddr + MMC_RX_IPV4_HDERR);
- mmc->mmc_rx_ipv4_nopay += readl(ioaddr + MMC_RX_IPV4_NOPAY);
- mmc->mmc_rx_ipv4_frag += readl(ioaddr + MMC_RX_IPV4_FRAG);
- mmc->mmc_rx_ipv4_udsbl += readl(ioaddr + MMC_RX_IPV4_UDSBL);
+ mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD);
+ mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR);
+ mmc->mmc_rx_ipv4_nopay += readl(mmcaddr + MMC_RX_IPV4_NOPAY);
+ mmc->mmc_rx_ipv4_frag += readl(mmcaddr + MMC_RX_IPV4_FRAG);
+ mmc->mmc_rx_ipv4_udsbl += readl(mmcaddr + MMC_RX_IPV4_UDSBL);
- mmc->mmc_rx_ipv4_gd_octets += readl(ioaddr + MMC_RX_IPV4_GD_OCTETS);
+ mmc->mmc_rx_ipv4_gd_octets += readl(mmcaddr + MMC_RX_IPV4_GD_OCTETS);
mmc->mmc_rx_ipv4_hderr_octets +=
- readl(ioaddr + MMC_RX_IPV4_HDERR_OCTETS);
+ readl(mmcaddr + MMC_RX_IPV4_HDERR_OCTETS);
mmc->mmc_rx_ipv4_nopay_octets +=
- readl(ioaddr + MMC_RX_IPV4_NOPAY_OCTETS);
- mmc->mmc_rx_ipv4_frag_octets += readl(ioaddr + MMC_RX_IPV4_FRAG_OCTETS);
+ readl(mmcaddr + MMC_RX_IPV4_NOPAY_OCTETS);
+ mmc->mmc_rx_ipv4_frag_octets += readl(mmcaddr +
+ MMC_RX_IPV4_FRAG_OCTETS);
mmc->mmc_rx_ipv4_udsbl_octets +=
- readl(ioaddr + MMC_RX_IPV4_UDSBL_OCTETS);
+ readl(mmcaddr + MMC_RX_IPV4_UDSBL_OCTETS);
/* IPV6 */
- mmc->mmc_rx_ipv6_gd_octets += readl(ioaddr + MMC_RX_IPV6_GD_OCTETS);
+ mmc->mmc_rx_ipv6_gd_octets += readl(mmcaddr + MMC_RX_IPV6_GD_OCTETS);
mmc->mmc_rx_ipv6_hderr_octets +=
- readl(ioaddr + MMC_RX_IPV6_HDERR_OCTETS);
+ readl(mmcaddr + MMC_RX_IPV6_HDERR_OCTETS);
mmc->mmc_rx_ipv6_nopay_octets +=
- readl(ioaddr + MMC_RX_IPV6_NOPAY_OCTETS);
+ readl(mmcaddr + MMC_RX_IPV6_NOPAY_OCTETS);
- mmc->mmc_rx_ipv6_gd += readl(ioaddr + MMC_RX_IPV6_GD);
- mmc->mmc_rx_ipv6_hderr += readl(ioaddr + MMC_RX_IPV6_HDERR);
- mmc->mmc_rx_ipv6_nopay += readl(ioaddr + MMC_RX_IPV6_NOPAY);
+ mmc->mmc_rx_ipv6_gd += readl(mmcaddr + MMC_RX_IPV6_GD);
+ mmc->mmc_rx_ipv6_hderr += readl(mmcaddr + MMC_RX_IPV6_HDERR);
+ mmc->mmc_rx_ipv6_nopay += readl(mmcaddr + MMC_RX_IPV6_NOPAY);
/* Protocols */
- mmc->mmc_rx_udp_gd += readl(ioaddr + MMC_RX_UDP_GD);
- mmc->mmc_rx_udp_err += readl(ioaddr + MMC_RX_UDP_ERR);
- mmc->mmc_rx_tcp_gd += readl(ioaddr + MMC_RX_TCP_GD);
- mmc->mmc_rx_tcp_err += readl(ioaddr + MMC_RX_TCP_ERR);
- mmc->mmc_rx_icmp_gd += readl(ioaddr + MMC_RX_ICMP_GD);
- mmc->mmc_rx_icmp_err += readl(ioaddr + MMC_RX_ICMP_ERR);
+ mmc->mmc_rx_udp_gd += readl(mmcaddr + MMC_RX_UDP_GD);
+ mmc->mmc_rx_udp_err += readl(mmcaddr + MMC_RX_UDP_ERR);
+ mmc->mmc_rx_tcp_gd += readl(mmcaddr + MMC_RX_TCP_GD);
+ mmc->mmc_rx_tcp_err += readl(mmcaddr + MMC_RX_TCP_ERR);
+ mmc->mmc_rx_icmp_gd += readl(mmcaddr + MMC_RX_ICMP_GD);
+ mmc->mmc_rx_icmp_err += readl(mmcaddr + MMC_RX_ICMP_ERR);
- mmc->mmc_rx_udp_gd_octets += readl(ioaddr + MMC_RX_UDP_GD_OCTETS);
- mmc->mmc_rx_udp_err_octets += readl(ioaddr + MMC_RX_UDP_ERR_OCTETS);
- mmc->mmc_rx_tcp_gd_octets += readl(ioaddr + MMC_RX_TCP_GD_OCTETS);
- mmc->mmc_rx_tcp_err_octets += readl(ioaddr + MMC_RX_TCP_ERR_OCTETS);
- mmc->mmc_rx_icmp_gd_octets += readl(ioaddr + MMC_RX_ICMP_GD_OCTETS);
- mmc->mmc_rx_icmp_err_octets += readl(ioaddr + MMC_RX_ICMP_ERR_OCTETS);
+ mmc->mmc_rx_udp_gd_octets += readl(mmcaddr + MMC_RX_UDP_GD_OCTETS);
+ mmc->mmc_rx_udp_err_octets += readl(mmcaddr + MMC_RX_UDP_ERR_OCTETS);
+ mmc->mmc_rx_tcp_gd_octets += readl(mmcaddr + MMC_RX_TCP_GD_OCTETS);
+ mmc->mmc_rx_tcp_err_octets += readl(mmcaddr + MMC_RX_TCP_ERR_OCTETS);
+ mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS);
+ mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index 011386f6f24d..2beacd0d3043 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -279,6 +279,26 @@ static int ndesc_get_rx_timestamp_status(void *desc, u32 ats)
return 1;
}
+static void ndesc_display_ring(void *head, unsigned int size, bool rx)
+{
+ struct dma_desc *p = (struct dma_desc *)head;
+ int i;
+
+ pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
+
+ for (i = 0; i < size; i++) {
+ u64 x;
+
+ x = *(u64 *)p;
+ pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
+ i, (unsigned int)virt_to_phys(p),
+ (unsigned int)x, (unsigned int)(x >> 32),
+ p->des2, p->des3);
+ p++;
+ }
+ pr_info("\n");
+}
+
const struct stmmac_desc_ops ndesc_ops = {
.tx_status = ndesc_get_tx_status,
.rx_status = ndesc_get_rx_status,
@@ -297,4 +317,5 @@ const struct stmmac_desc_ops ndesc_ops = {
.get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
.get_timestamp = ndesc_get_timestamp,
.get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
+ .display_ring = ndesc_display_ring,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 8bbab97895fe..8dc9056c1001 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -24,7 +24,7 @@
#define __STMMAC_H__
#define STMMAC_RESOURCE_NAME "stmmaceth"
-#define DRV_MODULE_VERSION "Oct_2015"
+#define DRV_MODULE_VERSION "Jan_2016"
#include <linux/clk.h>
#include <linux/stmmac.h>
@@ -67,6 +67,7 @@ struct stmmac_priv {
spinlock_t tx_lock;
bool tx_path_in_lpi_mode;
struct timer_list txtimer;
+ bool tso;
struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
struct dma_extended_desc *dma_erx;
@@ -116,7 +117,6 @@ struct stmmac_priv {
int eee_enabled;
int eee_active;
int tx_lpi_timer;
- int pcs;
unsigned int mode;
int extend_desc;
struct ptp_clock *ptp_clock;
@@ -128,6 +128,10 @@ struct stmmac_priv {
int use_riwt;
int irq_wake;
spinlock_t ptp_lock;
+ void __iomem *mmcaddr;
+ u32 rx_tail_addr;
+ u32 tx_tail_addr;
+ u32 mss;
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir;
@@ -143,9 +147,9 @@ void stmmac_set_ethtool_ops(struct net_device *netdev);
int stmmac_ptp_register(struct stmmac_priv *priv);
void stmmac_ptp_unregister(struct stmmac_priv *priv);
-int stmmac_resume(struct net_device *ndev);
-int stmmac_suspend(struct net_device *ndev);
-int stmmac_dvr_remove(struct net_device *ndev);
+int stmmac_resume(struct device *dev);
+int stmmac_suspend(struct device *dev);
+int stmmac_dvr_remove(struct device *dev);
int stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
struct stmmac_resources *res);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 3c7928edfebb..1e06173fc9d7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -161,6 +161,9 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(mtl_rx_fifo_ctrl_active),
STMMAC_STAT(mac_rx_frame_ctrl_fifo),
STMMAC_STAT(mac_gmii_rx_proto_engine),
+ /* TSO */
+ STMMAC_STAT(tx_tso_frames),
+ STMMAC_STAT(tx_tso_nfrags),
};
#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
@@ -273,7 +276,8 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
struct phy_device *phy = priv->phydev;
int rc;
- if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
+ if (priv->hw->pcs & STMMAC_PCS_RGMII ||
+ priv->hw->pcs & STMMAC_PCS_SGMII) {
struct rgmii_adv adv;
if (!priv->xstats.pcs_link) {
@@ -286,10 +290,10 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
/* Get and convert ADV/LP_ADV from the HW AN registers */
- if (!priv->hw->mac->get_adv)
+ if (!priv->hw->mac->pcs_get_adv_lp)
return -EOPNOTSUPP; /* should never happen indeed */
- priv->hw->mac->get_adv(priv->hw, &adv);
+ priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv);
/* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
@@ -358,7 +362,8 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
struct phy_device *phy = priv->phydev;
int rc;
- if ((priv->pcs & STMMAC_PCS_RGMII) || (priv->pcs & STMMAC_PCS_SGMII)) {
+ if (priv->hw->pcs & STMMAC_PCS_RGMII ||
+ priv->hw->pcs & STMMAC_PCS_SGMII) {
u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
/* Only support ANE */
@@ -373,8 +378,11 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
ADVERTISED_10baseT_Full);
spin_lock(&priv->lock);
- if (priv->hw->mac->ctrl_ane)
- priv->hw->mac->ctrl_ane(priv->hw, 1);
+
+ if (priv->hw->mac->pcs_ctrl_ane)
+ priv->hw->mac->pcs_ctrl_ane(priv->ioaddr, 1,
+ priv->hw->ps, 0);
+
spin_unlock(&priv->lock);
return 0;
@@ -449,11 +457,22 @@ stmmac_get_pauseparam(struct net_device *netdev,
{
struct stmmac_priv *priv = netdev_priv(netdev);
- if (priv->pcs) /* FIXME */
- return;
-
pause->rx_pause = 0;
pause->tx_pause = 0;
+
+ if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
+ struct rgmii_adv adv_lp;
+
+ pause->autoneg = 1;
+ priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
+ if (!adv_lp.pause)
+ return;
+ } else {
+ if (!(priv->phydev->supported & SUPPORTED_Pause) ||
+ !(priv->phydev->supported & SUPPORTED_Asym_Pause))
+ return;
+ }
+
pause->autoneg = priv->phydev->autoneg;
if (priv->flow_ctrl & FLOW_RX)
@@ -470,10 +489,19 @@ stmmac_set_pauseparam(struct net_device *netdev,
struct stmmac_priv *priv = netdev_priv(netdev);
struct phy_device *phy = priv->phydev;
int new_pause = FLOW_OFF;
- int ret = 0;
- if (priv->pcs) /* FIXME */
- return -EOPNOTSUPP;
+ if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
+ struct rgmii_adv adv_lp;
+
+ pause->autoneg = 1;
+ priv->hw->mac->pcs_get_adv_lp(priv->ioaddr, &adv_lp);
+ if (!adv_lp.pause)
+ return -EOPNOTSUPP;
+ } else {
+ if (!(phy->supported & SUPPORTED_Pause) ||
+ !(phy->supported & SUPPORTED_Asym_Pause))
+ return -EOPNOTSUPP;
+ }
if (pause->rx_pause)
new_pause |= FLOW_RX;
@@ -485,11 +513,12 @@ stmmac_set_pauseparam(struct net_device *netdev,
if (phy->autoneg) {
if (netif_running(netdev))
- ret = phy_start_aneg(phy);
- } else
- priv->hw->mac->flow_ctrl(priv->hw, phy->duplex,
- priv->flow_ctrl, priv->pause);
- return ret;
+ return phy_start_aneg(phy);
+ }
+
+ priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
+ priv->pause);
+ return 0;
}
static void stmmac_get_ethtool_stats(struct net_device *dev,
@@ -499,14 +528,14 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
int i, j = 0;
/* Update the DMA HW counters for dwmac10/100 */
- if (!priv->plat->has_gmac)
+ if (priv->hw->dma->dma_diagnostic_fr)
priv->hw->dma->dma_diagnostic_fr(&dev->stats,
(void *) &priv->xstats,
priv->ioaddr);
else {
/* If supported, for new GMAC chips expose the MMC counters */
if (priv->dma_cap.rmon) {
- dwmac_mmc_read(priv->ioaddr, &priv->mmc);
+ dwmac_mmc_read(priv->mmcaddr, &priv->mmc);
for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
char *p;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index fcbd4be562e2..4c8c60af7985 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -56,6 +56,7 @@
#include "dwmac1000.h"
#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
+#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
/* Module parameters */
#define TX_TIMEO 5000
@@ -284,8 +285,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
/* Using PCS we cannot dial with the phy registers at this stage
* so we do not support extra feature like EEE.
*/
- if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
- (priv->pcs == STMMAC_PCS_RTBI))
+ if ((priv->hw->pcs == STMMAC_PCS_RGMII) ||
+ (priv->hw->pcs == STMMAC_PCS_TBI) ||
+ (priv->hw->pcs == STMMAC_PCS_RTBI))
goto out;
/* MAC core supports the EEE feature. */
@@ -721,13 +723,15 @@ static void stmmac_adjust_link(struct net_device *dev)
new_state = 1;
switch (phydev->speed) {
case 1000:
- if (likely(priv->plat->has_gmac))
+ if (likely((priv->plat->has_gmac) ||
+ (priv->plat->has_gmac4)))
ctrl &= ~priv->hw->link.port;
stmmac_hw_fix_mac_speed(priv);
break;
case 100:
case 10:
- if (priv->plat->has_gmac) {
+ if (likely((priv->plat->has_gmac) ||
+ (priv->plat->has_gmac4))) {
ctrl |= priv->hw->link.port;
if (phydev->speed == SPEED_100) {
ctrl |= priv->hw->link.speed;
@@ -796,10 +800,10 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
(interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
(interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
pr_debug("STMMAC: PCS RGMII support enable\n");
- priv->pcs = STMMAC_PCS_RGMII;
+ priv->hw->pcs = STMMAC_PCS_RGMII;
} else if (interface == PHY_INTERFACE_MODE_SGMII) {
pr_debug("STMMAC: PCS SGMII support enable\n");
- priv->pcs = STMMAC_PCS_SGMII;
+ priv->hw->pcs = STMMAC_PCS_SGMII;
}
}
}
@@ -875,53 +879,22 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
-/**
- * stmmac_display_ring - display ring
- * @head: pointer to the head of the ring passed.
- * @size: size of the ring.
- * @extend_desc: to verify if extended descriptors are used.
- * Description: display the control/status and buffer descriptors.
- */
-static void stmmac_display_ring(void *head, int size, int extend_desc)
-{
- int i;
- struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
- struct dma_desc *p = (struct dma_desc *)head;
-
- for (i = 0; i < size; i++) {
- u64 x;
- if (extend_desc) {
- x = *(u64 *) ep;
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
- i, (unsigned int)virt_to_phys(ep),
- (unsigned int)x, (unsigned int)(x >> 32),
- ep->basic.des2, ep->basic.des3);
- ep++;
- } else {
- x = *(u64 *) p;
- pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
- i, (unsigned int)virt_to_phys(p),
- (unsigned int)x, (unsigned int)(x >> 32),
- p->des2, p->des3);
- p++;
- }
- pr_info("\n");
- }
-}
-
static void stmmac_display_rings(struct stmmac_priv *priv)
{
+ void *head_rx, *head_tx;
+
if (priv->extend_desc) {
- pr_info("Extended RX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1);
- pr_info("Extended TX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1);
+ head_rx = (void *)priv->dma_erx;
+ head_tx = (void *)priv->dma_etx;
} else {
- pr_info("RX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0);
- pr_info("TX descriptor ring:\n");
- stmmac_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0);
+ head_rx = (void *)priv->dma_rx;
+ head_tx = (void *)priv->dma_tx;
}
+
+ /* Display Rx ring */
+ priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
+ /* Display Tx ring */
+ priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
}
static int stmmac_set_bfsize(int mtu, int bufsize)
@@ -1000,7 +973,10 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
return -EINVAL;
}
- p->des2 = priv->rx_skbuff_dma[i];
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ p->des0 = priv->rx_skbuff_dma[i];
+ else
+ p->des2 = priv->rx_skbuff_dma[i];
if ((priv->hw->mode->init_desc3) &&
(priv->dma_buf_sz == BUF_SIZE_16KiB))
@@ -1091,7 +1067,16 @@ static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
p = &((priv->dma_etx + i)->basic);
else
p = priv->dma_tx + i;
- p->des2 = 0;
+
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+ } else {
+ p->des2 = 0;
+ }
+
priv->tx_skbuff_dma[i].buf = 0;
priv->tx_skbuff_dma[i].map_as_page = false;
priv->tx_skbuff_dma[i].len = 0;
@@ -1354,9 +1339,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
priv->tx_skbuff_dma[entry].len,
DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry].buf = 0;
+ priv->tx_skbuff_dma[entry].len = 0;
priv->tx_skbuff_dma[entry].map_as_page = false;
}
- priv->hw->mode->clean_desc3(priv, p);
+
+ if (priv->hw->mode->clean_desc3)
+ priv->hw->mode->clean_desc3(priv, p);
+
priv->tx_skbuff_dma[entry].last_segment = false;
priv->tx_skbuff_dma[entry].is_jumbo = false;
@@ -1479,41 +1468,23 @@ static void stmmac_dma_interrupt(struct stmmac_priv *priv)
static void stmmac_mmc_setup(struct stmmac_priv *priv)
{
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
- MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+ MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
+ else
+ priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
- dwmac_mmc_intr_all_mask(priv->ioaddr);
+ dwmac_mmc_intr_all_mask(priv->mmcaddr);
if (priv->dma_cap.rmon) {
- dwmac_mmc_ctrl(priv->ioaddr, mode);
+ dwmac_mmc_ctrl(priv->mmcaddr, mode);
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
} else
pr_info(" No MAC Management Counters available\n");
}
/**
- * stmmac_get_synopsys_id - return the SYINID.
- * @priv: driver private structure
- * Description: this simple function is to decode and return the SYINID
- * starting from the HW core register.
- */
-static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
-{
- u32 hwid = priv->hw->synopsys_uid;
-
- /* Check Synopsys Id (not available on old chips) */
- if (likely(hwid)) {
- u32 uid = ((hwid & 0x0000ff00) >> 8);
- u32 synid = (hwid & 0x000000ff);
-
- pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
- uid, synid);
-
- return synid;
- }
- return 0;
-}
-
-/**
* stmmac_selec_desc_mode - to select among: normal/alternate/extend descriptors
* @priv: driver private structure
* Description: select the Enhanced/Alternate or Normal descriptors.
@@ -1550,51 +1521,15 @@ static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
*/
static int stmmac_get_hw_features(struct stmmac_priv *priv)
{
- u32 hw_cap = 0;
+ u32 ret = 0;
if (priv->hw->dma->get_hw_feature) {
- hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
-
- priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
- priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
- priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
- priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
- priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
- priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
- priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
- priv->dma_cap.pmt_remote_wake_up =
- (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
- priv->dma_cap.pmt_magic_frame =
- (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
- /* MMC */
- priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
- /* IEEE 1588-2002 */
- priv->dma_cap.time_stamp =
- (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
- /* IEEE 1588-2008 */
- priv->dma_cap.atime_stamp =
- (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
- /* 802.3az - Energy-Efficient Ethernet (EEE) */
- priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
- priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
- /* TX and RX csum */
- priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
- priv->dma_cap.rx_coe_type1 =
- (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
- priv->dma_cap.rx_coe_type2 =
- (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
- priv->dma_cap.rxfifo_over_2048 =
- (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
- /* TX and RX number of channels */
- priv->dma_cap.number_rx_channel =
- (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
- priv->dma_cap.number_tx_channel =
- (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
- /* Alternate (enhanced) DESC mode */
- priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
- }
-
- return hw_cap;
+ priv->hw->dma->get_hw_feature(priv->ioaddr,
+ &priv->dma_cap);
+ ret = 1;
+ }
+
+ return ret;
}
/**
@@ -1650,8 +1585,19 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
aal, priv->dma_tx_phy, priv->dma_rx_phy, atds);
- if ((priv->synopsys_id >= DWMAC_CORE_3_50) &&
- (priv->plat->axi && priv->hw->dma->axi))
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ priv->rx_tail_addr = priv->dma_rx_phy +
+ (DMA_RX_SIZE * sizeof(struct dma_desc));
+ priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
+ STMMAC_CHAN0);
+
+ priv->tx_tail_addr = priv->dma_tx_phy +
+ (DMA_TX_SIZE * sizeof(struct dma_desc));
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+ STMMAC_CHAN0);
+ }
+
+ if (priv->plat->axi && priv->hw->dma->axi)
priv->hw->dma->axi(priv->ioaddr, priv->plat->axi);
return ret;
@@ -1720,6 +1666,19 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
if (priv->plat->bus_setup)
priv->plat->bus_setup(priv->ioaddr);
+ /* PS and related bits will be programmed according to the speed */
+ if (priv->hw->pcs) {
+ int speed = priv->plat->mac_port_sel_speed;
+
+ if ((speed == SPEED_10) || (speed == SPEED_100) ||
+ (speed == SPEED_1000)) {
+ priv->hw->ps = speed;
+ } else {
+ dev_warn(priv->device, "invalid port speed\n");
+ priv->hw->ps = 0;
+ }
+ }
+
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->hw, dev->mtu);
@@ -1731,7 +1690,10 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
}
/* Enable the MAC Rx/Tx */
- stmmac_set_mac(priv->ioaddr, true);
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ stmmac_dwmac4_set_mac(priv->ioaddr, true);
+ else
+ stmmac_set_mac(priv->ioaddr, true);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -1766,8 +1728,20 @@ static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
}
- if (priv->pcs && priv->hw->mac->ctrl_ane)
- priv->hw->mac->ctrl_ane(priv->hw, 0);
+ if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
+ priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
+
+ /* set TX ring length */
+ if (priv->hw->dma->set_tx_ring_len)
+ priv->hw->dma->set_tx_ring_len(priv->ioaddr,
+ (DMA_TX_SIZE - 1));
+ /* set RX ring length */
+ if (priv->hw->dma->set_rx_ring_len)
+ priv->hw->dma->set_rx_ring_len(priv->ioaddr,
+ (DMA_RX_SIZE - 1));
+ /* Enable TSO */
+ if (priv->tso)
+ priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
return 0;
}
@@ -1788,8 +1762,9 @@ static int stmmac_open(struct net_device *dev)
stmmac_check_ether_addr(priv);
- if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
- priv->pcs != STMMAC_PCS_RTBI) {
+ if (priv->hw->pcs != STMMAC_PCS_RGMII &&
+ priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI) {
ret = stmmac_init_phy(dev);
if (ret) {
pr_err("%s: Cannot attach to PHY (error: %d)\n",
@@ -1934,6 +1909,239 @@ static int stmmac_release(struct net_device *dev)
}
/**
+ * stmmac_tso_allocator - close entry point of the driver
+ * @priv: driver private structure
+ * @des: buffer start address
+ * @total_len: total length to fill in descriptors
+ * @last_segmant: condition for the last descriptor
+ * Description:
+ * This function fills descriptor and request new descriptors according to
+ * buffer length to fill
+ */
+static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
+ int total_len, bool last_segment)
+{
+ struct dma_desc *desc;
+ int tmp_len;
+ u32 buff_size;
+
+ tmp_len = total_len;
+
+ while (tmp_len > 0) {
+ priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+ desc = priv->dma_tx + priv->cur_tx;
+
+ desc->des0 = des + (total_len - tmp_len);
+ buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
+ TSO_MAX_BUFF_SIZE : tmp_len;
+
+ priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
+ 0, 1,
+ (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
+ 0, 0);
+
+ tmp_len -= TSO_MAX_BUFF_SIZE;
+ }
+}
+
+/**
+ * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description: this is the transmit function that is called on TSO frames
+ * (support available on GMAC4 and newer chips).
+ * Diagram below show the ring programming in case of TSO frames:
+ *
+ * First Descriptor
+ * --------
+ * | DES0 |---> buffer1 = L2/L3/L4 header
+ * | DES1 |---> TCP Payload (can continue on next descr...)
+ * | DES2 |---> buffer 1 and 2 len
+ * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
+ * --------
+ * |
+ * ...
+ * |
+ * --------
+ * | DES0 | --| Split TCP Payload on Buffers 1 and 2
+ * | DES1 | --|
+ * | DES2 | --> buffer 1 and 2 len
+ * | DES3 |
+ * --------
+ *
+ * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
+ */
+static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ u32 pay_len, mss;
+ int tmp_pay_len = 0;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ unsigned int first_entry, des;
+ struct dma_desc *desc, *first, *mss_desc = NULL;
+ u8 proto_hdr_len;
+ int i;
+
+ spin_lock(&priv->tx_lock);
+
+ /* Compute header lengths */
+ proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ /* Desc availability based on threshold should be enough safe */
+ if (unlikely(stmmac_tx_avail(priv) <
+ (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
+ if (!netif_queue_stopped(dev)) {
+ netif_stop_queue(dev);
+ /* This is a hard error, log it. */
+ pr_err("%s: Tx Ring full when queue awake\n", __func__);
+ }
+ spin_unlock(&priv->tx_lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
+
+ mss = skb_shinfo(skb)->gso_size;
+
+ /* set new MSS value if needed */
+ if (mss != priv->mss) {
+ mss_desc = priv->dma_tx + priv->cur_tx;
+ priv->hw->desc->set_mss(mss_desc, mss);
+ priv->mss = mss;
+ priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+ }
+
+ if (netif_msg_tx_queued(priv)) {
+ pr_info("%s: tcphdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
+ __func__, tcp_hdrlen(skb), proto_hdr_len, pay_len, mss);
+ pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
+ skb->data_len);
+ }
+
+ first_entry = priv->cur_tx;
+
+ desc = priv->dma_tx + first_entry;
+ first = desc;
+
+ /* first descriptor: fill Headers on Buf1 */
+ des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
+
+ priv->tx_skbuff_dma[first_entry].buf = des;
+ priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+ priv->tx_skbuff[first_entry] = skb;
+
+ first->des0 = des;
+
+ /* Fill start of payload in buff2 of first descriptor */
+ if (pay_len)
+ first->des1 = des + proto_hdr_len;
+
+ /* If needed take extra descriptors to fill the remaining payload */
+ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+
+ stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
+
+ /* Prepare fragments */
+ for (i = 0; i < nfrags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ des = skb_frag_dma_map(priv->device, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+
+ stmmac_tso_allocator(priv, des, skb_frag_size(frag),
+ (i == nfrags - 1));
+
+ priv->tx_skbuff_dma[priv->cur_tx].buf = des;
+ priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
+ priv->tx_skbuff[priv->cur_tx] = NULL;
+ priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
+ }
+
+ priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
+
+ priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
+
+ if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
+ if (netif_msg_hw(priv))
+ pr_debug("%s: stop transmitted packets\n", __func__);
+ netif_stop_queue(dev);
+ }
+
+ dev->stats.tx_bytes += skb->len;
+ priv->xstats.tx_tso_frames++;
+ priv->xstats.tx_tso_nfrags += nfrags;
+
+ /* Manage tx mitigation */
+ priv->tx_count_frames += nfrags + 1;
+ if (likely(priv->tx_coal_frames > priv->tx_count_frames)) {
+ mod_timer(&priv->txtimer,
+ STMMAC_COAL_TIMER(priv->tx_coal_timer));
+ } else {
+ priv->tx_count_frames = 0;
+ priv->hw->desc->set_tx_ic(desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ if (!priv->hwts_tx_en)
+ skb_tx_timestamp(skb);
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ priv->hw->desc->enable_tx_timestamp(first);
+ }
+
+ /* Complete the first descriptor before granting the DMA */
+ priv->hw->desc->prepare_tso_tx_desc(first, 1,
+ proto_hdr_len,
+ pay_len,
+ 1, priv->tx_skbuff_dma[first_entry].last_segment,
+ tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
+
+ /* If context desc is used to change MSS */
+ if (mss_desc)
+ priv->hw->desc->set_tx_owner(mss_desc);
+
+ /* The own bit must be the latest setting done when prepare the
+ * descriptor and then barrier is needed to make sure that
+ * all is coherent before granting the DMA engine.
+ */
+ smp_wmb();
+
+ if (netif_msg_pktdata(priv)) {
+ pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
+ __func__, priv->cur_tx, priv->dirty_tx, first_entry,
+ priv->cur_tx, first, nfrags);
+
+ priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
+ 0);
+
+ pr_info(">>> frame to be transmitted: ");
+ print_pkt(skb->data, skb_headlen(skb));
+ }
+
+ netdev_sent_queue(dev, skb->len);
+
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+ STMMAC_CHAN0);
+
+ spin_unlock(&priv->tx_lock);
+ return NETDEV_TX_OK;
+
+dma_map_err:
+ spin_unlock(&priv->tx_lock);
+ dev_err(priv->device, "Tx dma map failed\n");
+ dev_kfree_skb(skb);
+ priv->dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+/**
* stmmac_xmit - Tx entry point of the driver
* @skb : the socket buffer
* @dev : device pointer
@@ -1950,6 +2158,13 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int entry, first_entry;
struct dma_desc *desc, *first;
unsigned int enh_desc;
+ unsigned int des;
+
+ /* Manage oversized TCP frames for GMAC4 device */
+ if (skb_is_gso(skb) && priv->tso) {
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ return stmmac_tso_xmit(skb, dev);
+ }
spin_lock(&priv->tx_lock);
@@ -1985,7 +2200,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (enh_desc)
is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
- if (unlikely(is_jumbo)) {
+ if (unlikely(is_jumbo) && likely(priv->synopsys_id <
+ DWMAC_CORE_4_00)) {
entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
if (unlikely(entry < 0))
goto dma_map_err;
@@ -2003,13 +2219,21 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
else
desc = priv->dma_tx + entry;
- desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, desc->des2))
+ des = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
goto dma_map_err; /* should reuse desc w/o issues */
priv->tx_skbuff[entry] = NULL;
- priv->tx_skbuff_dma[entry].buf = desc->des2;
+
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+ desc->des0 = des;
+ priv->tx_skbuff_dma[entry].buf = desc->des0;
+ } else {
+ desc->des2 = des;
+ priv->tx_skbuff_dma[entry].buf = desc->des2;
+ }
+
priv->tx_skbuff_dma[entry].map_as_page = true;
priv->tx_skbuff_dma[entry].len = len;
priv->tx_skbuff_dma[entry].last_segment = last_segment;
@@ -2024,16 +2248,18 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
priv->cur_tx = entry;
if (netif_msg_pktdata(priv)) {
+ void *tx_head;
+
pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
__func__, priv->cur_tx, priv->dirty_tx, first_entry,
entry, first, nfrags);
if (priv->extend_desc)
- stmmac_display_ring((void *)priv->dma_etx,
- DMA_TX_SIZE, 1);
+ tx_head = (void *)priv->dma_etx;
else
- stmmac_display_ring((void *)priv->dma_tx,
- DMA_TX_SIZE, 0);
+ tx_head = (void *)priv->dma_tx;
+
+ priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
pr_debug(">>> frame to be transmitted: ");
print_pkt(skb->data, skb->len);
@@ -2072,12 +2298,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (likely(!is_jumbo)) {
bool last_segment = (nfrags == 0);
- first->des2 = dma_map_single(priv->device, skb->data,
- nopaged_len, DMA_TO_DEVICE);
- if (dma_mapping_error(priv->device, first->des2))
+ des = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
goto dma_map_err;
- priv->tx_skbuff_dma[first_entry].buf = first->des2;
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+ first->des0 = des;
+ priv->tx_skbuff_dma[first_entry].buf = first->des0;
+ } else {
+ first->des2 = des;
+ priv->tx_skbuff_dma[first_entry].buf = first->des2;
+ }
+
priv->tx_skbuff_dma[first_entry].len = nopaged_len;
priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
@@ -2101,7 +2334,12 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
}
netdev_sent_queue(dev, skb->len);
- priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+
+ if (priv->synopsys_id < DWMAC_CORE_4_00)
+ priv->hw->dma->enable_dma_transmission(priv->ioaddr);
+ else
+ priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
+ STMMAC_CHAN0);
spin_unlock(&priv->tx_lock);
return NETDEV_TX_OK;
@@ -2183,9 +2421,15 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
dev_kfree_skb(skb);
break;
}
- p->des2 = priv->rx_skbuff_dma[entry];
- priv->hw->mode->refill_desc3(priv, p);
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
+ p->des0 = priv->rx_skbuff_dma[entry];
+ p->des1 = 0;
+ } else {
+ p->des2 = priv->rx_skbuff_dma[entry];
+ }
+ if (priv->hw->mode->refill_desc3)
+ priv->hw->mode->refill_desc3(priv, p);
if (priv->rx_zeroc_thresh > 0)
priv->rx_zeroc_thresh--;
@@ -2193,9 +2437,13 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
if (netif_msg_rx_status(priv))
pr_debug("\trefill entry #%d\n", entry);
}
-
wmb();
- priv->hw->desc->set_rx_owner(p);
+
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+ priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
+ else
+ priv->hw->desc->set_rx_owner(p);
+
wmb();
entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
@@ -2218,13 +2466,15 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
int coe = priv->hw->rx_csum;
if (netif_msg_rx_status(priv)) {
+ void *rx_head;
+
pr_debug("%s: descriptor ring:\n", __func__);
if (priv->extend_desc)
- stmmac_display_ring((void *)priv->dma_erx,
- DMA_RX_SIZE, 1);
+ rx_head = (void *)priv->dma_erx;
else
- stmmac_display_ring((void *)priv->dma_rx,
- DMA_RX_SIZE, 0);
+ rx_head = (void *)priv->dma_rx;
+
+ priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
int status;
@@ -2274,11 +2524,23 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
} else {
struct sk_buff *skb;
int frame_len;
+ unsigned int des;
+
+ if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
+ des = p->des0;
+ else
+ des = p->des2;
frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
- /* check if frame_len fits the preallocated memory */
+ /* If frame length is greather than skb buffer size
+ * (preallocated during init) then the packet is
+ * ignored
+ */
if (frame_len > priv->dma_buf_sz) {
+ pr_err("%s: len %d larger than size (%d)\n",
+ priv->dev->name, frame_len,
+ priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
break;
}
@@ -2291,14 +2553,19 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
if (netif_msg_rx_status(priv)) {
pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
- p, entry, p->des2);
+ p, entry, des);
if (frame_len > ETH_FRAME_LEN)
pr_debug("\tframe size %d, COE: %d\n",
frame_len, status);
}
- if (unlikely((frame_len < priv->rx_copybreak) ||
- stmmac_rx_threshold_count(priv))) {
+ /* The zero-copy is always used for all the sizes
+ * in case of GMAC4 because it needs
+ * to refill the used descriptors, always.
+ */
+ if (unlikely(!priv->plat->has_gmac4 &&
+ ((frame_len < priv->rx_copybreak) ||
+ stmmac_rx_threshold_count(priv)))) {
skb = netdev_alloc_skb_ip_align(priv->dev,
frame_len);
if (unlikely(!skb)) {
@@ -2450,7 +2717,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return -EBUSY;
}
- if (priv->plat->enh_desc)
+ if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
max_mtu = JUMBO_LEN;
else
max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
@@ -2464,6 +2731,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
}
dev->mtu = new_mtu;
+
netdev_update_features(dev);
return 0;
@@ -2488,6 +2756,14 @@ static netdev_features_t stmmac_fix_features(struct net_device *dev,
if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
features &= ~NETIF_F_CSUM_MASK;
+ /* Disable tso if asked by ethtool */
+ if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ if (features & NETIF_F_TSO)
+ priv->tso = true;
+ else
+ priv->tso = false;
+ }
+
return features;
}
@@ -2534,7 +2810,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
}
/* To handle GMAC own interrupts */
- if (priv->plat->has_gmac) {
+ if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
int status = priv->hw->mac->host_irq_status(priv->hw,
&priv->xstats);
if (unlikely(status)) {
@@ -2543,6 +2819,18 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
priv->tx_path_in_lpi_mode = true;
if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
priv->tx_path_in_lpi_mode = false;
+ if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
+ priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
+ priv->rx_tail_addr,
+ STMMAC_CHAN0);
+ }
+
+ /* PCS link status */
+ if (priv->hw->pcs) {
+ if (priv->xstats.pcs_link)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
}
}
@@ -2615,15 +2903,14 @@ static void sysfs_display_ring(void *head, int size, int extend_desc,
x = *(u64 *) ep;
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
- (unsigned int)x, (unsigned int)(x >> 32),
+ ep->basic.des0, ep->basic.des1,
ep->basic.des2, ep->basic.des3);
ep++;
} else {
x = *(u64 *) p;
seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
i, (unsigned int)virt_to_phys(ep),
- (unsigned int)x, (unsigned int)(x >> 32),
- p->des2, p->des3);
+ p->des0, p->des1, p->des2, p->des3);
p++;
}
seq_printf(seq, "\n");
@@ -2706,10 +2993,15 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
seq_printf(seq, "\tChecksum Offload in TX: %s\n",
(priv->dma_cap.tx_coe) ? "Y" : "N");
- seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
- (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
- seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
- (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
+ (priv->dma_cap.rx_coe) ? "Y" : "N");
+ } else {
+ seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
+ seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ }
seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
(priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
@@ -2818,27 +3110,35 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
priv->dev->priv_flags |= IFF_UNICAST_FLT;
mac = dwmac1000_setup(priv->ioaddr,
priv->plat->multicast_filter_bins,
- priv->plat->unicast_filter_entries);
+ priv->plat->unicast_filter_entries,
+ &priv->synopsys_id);
+ } else if (priv->plat->has_gmac4) {
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac = dwmac4_setup(priv->ioaddr,
+ priv->plat->multicast_filter_bins,
+ priv->plat->unicast_filter_entries,
+ &priv->synopsys_id);
} else {
- mac = dwmac100_setup(priv->ioaddr);
+ mac = dwmac100_setup(priv->ioaddr, &priv->synopsys_id);
}
if (!mac)
return -ENOMEM;
priv->hw = mac;
- /* Get and dump the chip ID */
- priv->synopsys_id = stmmac_get_synopsys_id(priv);
-
/* To use the chained or ring mode */
- if (chain_mode) {
- priv->hw->mode = &chain_mode_ops;
- pr_info(" Chain mode enabled\n");
- priv->mode = STMMAC_CHAIN_MODE;
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ priv->hw->mode = &dwmac4_ring_mode_ops;
} else {
- priv->hw->mode = &ring_mode_ops;
- pr_info(" Ring mode enabled\n");
- priv->mode = STMMAC_RING_MODE;
+ if (chain_mode) {
+ priv->hw->mode = &chain_mode_ops;
+ pr_info(" Chain mode enabled\n");
+ priv->mode = STMMAC_CHAIN_MODE;
+ } else {
+ priv->hw->mode = &ring_mode_ops;
+ pr_info(" Ring mode enabled\n");
+ priv->mode = STMMAC_RING_MODE;
+ }
}
/* Get the HW capability (new GMAC newer than 3.50a) */
@@ -2853,6 +3153,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
*/
priv->plat->enh_desc = priv->dma_cap.enh_desc;
priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
+ priv->hw->pmt = priv->plat->pmt;
/* TXCOE doesn't work in thresh DMA mode */
if (priv->plat->force_thresh_dma_mode)
@@ -2860,6 +3161,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
else
priv->plat->tx_coe = priv->dma_cap.tx_coe;
+ /* In case of GMAC4 rx_coe is from HW cap register. */
+ priv->plat->rx_coe = priv->dma_cap.rx_coe;
+
if (priv->dma_cap.rx_coe_type2)
priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
else if (priv->dma_cap.rx_coe_type1)
@@ -2868,13 +3172,17 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
} else
pr_info(" No HW DMA feature register supported");
- /* To use alternate (extended) or normal descriptor structures */
- stmmac_selec_desc_mode(priv);
+ /* To use alternate (extended), normal or GMAC4 descriptor structures */
+ if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ priv->hw->desc = &dwmac4_desc_ops;
+ else
+ stmmac_selec_desc_mode(priv);
if (priv->plat->rx_coe) {
priv->hw->rx_csum = priv->plat->rx_coe;
- pr_info(" RX Checksum Offload Engine supported (type %d)\n",
- priv->plat->rx_coe);
+ pr_info(" RX Checksum Offload Engine supported\n");
+ if (priv->synopsys_id < DWMAC_CORE_4_00)
+ pr_info("\tCOE Type %d\n", priv->hw->rx_csum);
}
if (priv->plat->tx_coe)
pr_info(" TX Checksum insertion supported\n");
@@ -2884,6 +3192,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
device_set_wakeup_capable(priv->device, 1);
}
+ if (priv->dma_cap.tsoen)
+ pr_info(" TSO supported\n");
+
return 0;
}
@@ -2987,6 +3298,12 @@ int stmmac_dvr_probe(struct device *device,
ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
+
+ if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ ndev->hw_features |= NETIF_F_TSO;
+ priv->tso = true;
+ pr_info(" TSO feature enabled\n");
+ }
ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
#ifdef STMMAC_VLAN_TAG_USED
@@ -3032,8 +3349,9 @@ int stmmac_dvr_probe(struct device *device,
stmmac_check_pcs_mode(priv);
- if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
- priv->pcs != STMMAC_PCS_RTBI) {
+ if (priv->hw->pcs != STMMAC_PCS_RGMII &&
+ priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI) {
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
if (ret < 0) {
@@ -3062,12 +3380,13 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
/**
* stmmac_dvr_remove
- * @ndev: net device pointer
+ * @dev: device pointer
* Description: this function resets the TX/RX processes, disables the MAC RX/TX
* changes the link status, releases the DMA descriptor rings.
*/
-int stmmac_dvr_remove(struct net_device *ndev)
+int stmmac_dvr_remove(struct device *dev)
{
+ struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
pr_info("%s:\n\tremoving driver", __func__);
@@ -3078,12 +3397,14 @@ int stmmac_dvr_remove(struct net_device *ndev)
stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
+ of_node_put(priv->plat->phy_node);
if (priv->stmmac_rst)
reset_control_assert(priv->stmmac_rst);
clk_disable_unprepare(priv->pclk);
clk_disable_unprepare(priv->stmmac_clk);
- if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
- priv->pcs != STMMAC_PCS_RTBI)
+ if (priv->hw->pcs != STMMAC_PCS_RGMII &&
+ priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI)
stmmac_mdio_unregister(ndev);
free_netdev(ndev);
@@ -3093,13 +3414,14 @@ EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
/**
* stmmac_suspend - suspend callback
- * @ndev: net device pointer
+ * @dev: device pointer
* Description: this is the function to suspend the device and it is called
* by the platform driver to stop the network queue, release the resources,
* program the PMT register (for WoL), clean and release driver resources.
*/
-int stmmac_suspend(struct net_device *ndev)
+int stmmac_suspend(struct device *dev)
{
+ struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long flags;
@@ -3142,20 +3464,19 @@ EXPORT_SYMBOL_GPL(stmmac_suspend);
/**
* stmmac_resume - resume callback
- * @ndev: net device pointer
+ * @dev: device pointer
* Description: when resume this function is invoked to setup the DMA and CORE
* in a usable state.
*/
-int stmmac_resume(struct net_device *ndev)
+int stmmac_resume(struct device *dev)
{
+ struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long flags;
if (!netif_running(ndev))
return 0;
- spin_lock_irqsave(&priv->lock, flags);
-
/* Power Down bit, into the PM register, is cleared
* automatically as soon as a magic packet or a Wake-up frame
* is received. Anyway, it's better to manually clear
@@ -3163,7 +3484,9 @@ int stmmac_resume(struct net_device *ndev)
* from another devices (e.g. serial console).
*/
if (device_may_wakeup(priv->device)) {
+ spin_lock_irqsave(&priv->lock, flags);
priv->hw->mac->pmt(priv->hw, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
priv->irq_wake = 0;
} else {
pinctrl_pm_select_default_state(priv->device);
@@ -3177,10 +3500,17 @@ int stmmac_resume(struct net_device *ndev)
netif_device_attach(ndev);
+ spin_lock_irqsave(&priv->lock, flags);
+
priv->cur_rx = 0;
priv->dirty_rx = 0;
priv->dirty_tx = 0;
priv->cur_tx = 0;
+ /* reset private mss value to force mss context settings at
+ * next tso xmit (only used for gmac4).
+ */
+ priv->mss = 0;
+
stmmac_clear_descriptors(priv);
stmmac_hw_setup(ndev, false);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 06704ca6f9ca..ec295851812b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -37,6 +37,18 @@
#define MII_BUSY 0x00000001
#define MII_WRITE 0x00000002
+/* GMAC4 defines */
+#define MII_GMAC4_GOC_SHIFT 2
+#define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT)
+#define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT)
+
+#define MII_PHY_ADDR_GMAC4_SHIFT 21
+#define MII_PHY_ADDR_GMAC4_MASK GENMASK(25, 21)
+#define MII_PHY_REG_GMAC4_SHIFT 16
+#define MII_PHY_REG_GMAC4_MASK GENMASK(20, 16)
+#define MII_CSR_CLK_GMAC4_SHIFT 8
+#define MII_CSR_CLK_GMAC4_MASK GENMASK(11, 8)
+
static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
{
unsigned long curr;
@@ -124,6 +136,80 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
}
/**
+ * stmmac_mdio_read_gmac4
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 25-21
+ * @phyreg: MII addr reg bits 20-16
+ * Description: it reads data from the MII register of GMAC4 from within
+ * the phy device.
+ */
+static int stmmac_mdio_read_gmac4(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ int data;
+ u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
+ (MII_PHY_ADDR_GMAC4_MASK)) |
+ ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
+ (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_READ;
+
+ value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
+ << MII_CSR_CLK_GMAC4_SHIFT);
+
+ if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+ return -EBUSY;
+
+ writel(value, priv->ioaddr + mii_address);
+
+ if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+ return -EBUSY;
+
+ /* Read the data from the MII data register */
+ data = (int)readl(priv->ioaddr + mii_data);
+
+ return data;
+}
+
+/**
+ * stmmac_mdio_write_gmac4
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr reg bits 25-21
+ * @phyreg: MII addr reg bits 20-16
+ * @phydata: phy data
+ * Description: it writes the data into the MII register of GMAC4 from within
+ * the device.
+ */
+static int stmmac_mdio_write_gmac4(struct mii_bus *bus, int phyaddr, int phyreg,
+ u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+
+ u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
+ (MII_PHY_ADDR_GMAC4_MASK)) |
+ ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
+ (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_WRITE;
+
+ value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
+ << MII_CSR_CLK_GMAC4_SHIFT);
+
+ /* Wait until any existing MII operation is complete */
+ if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
+ return -EBUSY;
+
+ /* Set the MII address register to write */
+ writel(phydata, priv->ioaddr + mii_data);
+ writel(value, priv->ioaddr + mii_address);
+
+ /* Wait until any existing MII operation is complete */
+ return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
+}
+
+/**
* stmmac_mdio_reset
* @bus: points to the mii_bus structure
* Description: reset the MII bus
@@ -180,9 +266,11 @@ int stmmac_mdio_reset(struct mii_bus *bus)
/* This is a workaround for problems with the STE101P PHY.
* It doesn't complete its reset until at least one clock cycle
- * on MDC, so perform a dummy mdio read.
+ * on MDC, so perform a dummy mdio read. To be upadted for GMAC4
+ * if needed.
*/
- writel(0, priv->ioaddr + mii_address);
+ if (!priv->plat->has_gmac4)
+ writel(0, priv->ioaddr + mii_address);
#endif
return 0;
}
@@ -209,7 +297,7 @@ int stmmac_mdio_register(struct net_device *ndev)
return -ENOMEM;
if (mdio_bus_data->irqs)
- memcpy(new_bus->irq, mdio_bus_data, sizeof(new_bus->irq));
+ memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq));
#ifdef CONFIG_OF
if (priv->device->of_node)
@@ -217,8 +305,14 @@ int stmmac_mdio_register(struct net_device *ndev)
#endif
new_bus->name = "stmmac";
- new_bus->read = &stmmac_mdio_read;
- new_bus->write = &stmmac_mdio_write;
+ if (priv->plat->has_gmac4) {
+ new_bus->read = &stmmac_mdio_read_gmac4;
+ new_bus->write = &stmmac_mdio_write_gmac4;
+ } else {
+ new_bus->read = &stmmac_mdio_read;
+ new_bus->write = &stmmac_mdio_write;
+ }
+
new_bus->reset = &stmmac_mdio_reset;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
new_bus->name, priv->plat->bus_id);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index ae4388735b7f..56c8a2342c14 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -231,30 +231,10 @@ static int stmmac_pci_probe(struct pci_dev *pdev,
*/
static void stmmac_pci_remove(struct pci_dev *pdev)
{
- struct net_device *ndev = pci_get_drvdata(pdev);
-
- stmmac_dvr_remove(ndev);
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int stmmac_pci_suspend(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *ndev = pci_get_drvdata(pdev);
-
- return stmmac_suspend(ndev);
-}
-
-static int stmmac_pci_resume(struct device *dev)
-{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct net_device *ndev = pci_get_drvdata(pdev);
-
- return stmmac_resume(ndev);
+ stmmac_dvr_remove(&pdev->dev);
}
-#endif
-static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
+static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_suspend, stmmac_resume);
#define STMMAC_VENDOR_ID 0x700
#define STMMAC_QUARK_ID 0x0937
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
new file mode 100644
index 000000000000..eba41c24b7a7
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -0,0 +1,159 @@
+/*
+ * stmmac_pcs.h: Physical Coding Sublayer Header File
+ *
+ * Copyright (C) 2016 STMicroelectronics (R&D) Limited
+ * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __STMMAC_PCS_H__
+#define __STMMAC_PCS_H__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include "common.h"
+
+/* PCS registers (AN/TBI/SGMII/RGMII) offsets */
+#define GMAC_AN_CTRL(x) (x) /* AN control */
+#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */
+#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */
+#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */
+#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */
+#define GMAC_TBI(x) (x + 0x14) /* TBI extend status */
+
+/* AN Configuration defines */
+#define GMAC_AN_CTRL_RAN BIT(9) /* Restart Auto-Negotiation */
+#define GMAC_AN_CTRL_ANE BIT(12) /* Auto-Negotiation Enable */
+#define GMAC_AN_CTRL_ELE BIT(14) /* External Loopback Enable */
+#define GMAC_AN_CTRL_ECD BIT(16) /* Enable Comma Detect */
+#define GMAC_AN_CTRL_LR BIT(17) /* Lock to Reference */
+#define GMAC_AN_CTRL_SGMRAL BIT(18) /* SGMII RAL Control */
+
+/* AN Status defines */
+#define GMAC_AN_STATUS_LS BIT(2) /* Link Status 0:down 1:up */
+#define GMAC_AN_STATUS_ANA BIT(3) /* Auto-Negotiation Ability */
+#define GMAC_AN_STATUS_ANC BIT(5) /* Auto-Negotiation Complete */
+#define GMAC_AN_STATUS_ES BIT(8) /* Extended Status */
+
+/* ADV and LPA defines */
+#define GMAC_ANE_FD BIT(5)
+#define GMAC_ANE_HD BIT(6)
+#define GMAC_ANE_PSE GENMASK(8, 7)
+#define GMAC_ANE_PSE_SHIFT 7
+#define GMAC_ANE_RFE GENMASK(13, 12)
+#define GMAC_ANE_RFE_SHIFT 12
+#define GMAC_ANE_ACK BIT(14)
+
+/**
+ * dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @intr_status: GMAC core interrupt status
+ * @x: pointer to log these events as stats
+ * Description: it is the ISR for PCS events: Auto-Negotiation Completed and
+ * Link status.
+ */
+static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg,
+ unsigned int intr_status,
+ struct stmmac_extra_stats *x)
+{
+ u32 val = readl(ioaddr + GMAC_AN_STATUS(reg));
+
+ if (intr_status & PCS_ANE_IRQ) {
+ x->irq_pcs_ane_n++;
+ if (val & GMAC_AN_STATUS_ANC)
+ pr_info("stmmac_pcs: ANE process completed\n");
+ }
+
+ if (intr_status & PCS_LINK_IRQ) {
+ x->irq_pcs_link_n++;
+ if (val & GMAC_AN_STATUS_LS)
+ pr_info("stmmac_pcs: Link Up\n");
+ else
+ pr_info("stmmac_pcs: Link Down\n");
+ }
+}
+
+/**
+ * dwmac_rane - To restart ANE
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @restart: to restart ANE
+ * Description: this is to just restart the Auto-Negotiation.
+ */
+static inline void dwmac_rane(void __iomem *ioaddr, u32 reg, bool restart)
+{
+ u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
+
+ if (restart)
+ value |= GMAC_AN_CTRL_RAN;
+
+ writel(value, ioaddr + GMAC_AN_CTRL(reg));
+}
+
+/**
+ * dwmac_ctrl_ane - To program the AN Control Register.
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @ane: to enable the auto-negotiation
+ * @srgmi_ral: to manage MAC-2-MAC SGMII connections.
+ * @loopback: to cause the PHY to loopback tx data into rx path.
+ * Description: this is the main function to configure the AN control register
+ * and init the ANE, select loopback (usually for debugging purpose) and
+ * configure SGMII RAL.
+ */
+static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
+ bool srgmi_ral, bool loopback)
+{
+ u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
+
+ /* Enable and restart the Auto-Negotiation */
+ if (ane)
+ value |= GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_RAN;
+
+ /* In case of MAC-2-MAC connection, block is configured to operate
+ * according to MAC conf register.
+ */
+ if (srgmi_ral)
+ value |= GMAC_AN_CTRL_SGMRAL;
+
+ if (loopback)
+ value |= GMAC_AN_CTRL_ELE;
+
+ writel(value, ioaddr + GMAC_AN_CTRL(reg));
+}
+
+/**
+ * dwmac_get_adv_lp - Get ADV and LP cap
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @adv_lp: structure to store the adv,lp status
+ * Description: this is to expose the ANE advertisement and Link partner ability
+ * status to ethtool support.
+ */
+static inline void dwmac_get_adv_lp(void __iomem *ioaddr, u32 reg,
+ struct rgmii_adv *adv_lp)
+{
+ u32 value = readl(ioaddr + GMAC_ANE_ADV(reg));
+
+ if (value & GMAC_ANE_FD)
+ adv_lp->duplex = DUPLEX_FULL;
+ if (value & GMAC_ANE_HD)
+ adv_lp->duplex |= DUPLEX_HALF;
+
+ adv_lp->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+
+ value = readl(ioaddr + GMAC_ANE_LPA(reg));
+
+ if (value & GMAC_ANE_FD)
+ adv_lp->lp_duplex = DUPLEX_FULL;
+ if (value & GMAC_ANE_HD)
+ adv_lp->lp_duplex = DUPLEX_HALF;
+
+ adv_lp->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+}
+#endif /* __STMMAC_PCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index cf37ea558ecc..756bb548e81a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -113,8 +113,10 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
return NULL;
axi = kzalloc(sizeof(*axi), GFP_KERNEL);
- if (!axi)
+ if (!axi) {
+ of_node_put(np);
return ERR_PTR(-ENOMEM);
+ }
axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
@@ -127,6 +129,7 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt);
of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt);
of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
+ of_node_put(np);
return axi;
}
@@ -284,6 +287,13 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->pmt = 1;
}
+ if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
+ of_device_is_compatible(np, "snps,dwmac-4.10a")) {
+ plat->has_gmac4 = 1;
+ plat->pmt = 1;
+ plat->tso_en = of_property_read_bool(np, "snps,tso");
+ }
+
if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
of_device_is_compatible(np, "snps,dwmac-3.710")) {
plat->enh_desc = 1;
@@ -295,7 +305,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
if (!dma_cfg) {
- of_node_put(np);
+ of_node_put(plat->phy_node);
return ERR_PTR(-ENOMEM);
}
plat->dma_cfg = dma_cfg;
@@ -312,6 +322,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
pr_warn("force_sf_dma_mode is ignored if force_thresh_dma_mode is set.");
}
+ of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed);
+
plat->axi = stmmac_axi_setup(pdev);
return plat;
@@ -379,7 +391,7 @@ int stmmac_pltfr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
- int ret = stmmac_dvr_remove(ndev);
+ int ret = stmmac_dvr_remove(&pdev->dev);
if (priv->plat->exit)
priv->plat->exit(pdev, priv->plat->bsp_priv);
@@ -403,8 +415,10 @@ static int stmmac_pltfr_suspend(struct device *dev)
struct stmmac_priv *priv = netdev_priv(ndev);
struct platform_device *pdev = to_platform_device(dev);
- ret = stmmac_suspend(ndev);
- if (priv->plat->exit)
+ ret = stmmac_suspend(dev);
+ if (priv->plat->suspend)
+ priv->plat->suspend(pdev, priv->plat->bsp_priv);
+ else if (priv->plat->exit)
priv->plat->exit(pdev, priv->plat->bsp_priv);
return ret;
@@ -423,10 +437,12 @@ static int stmmac_pltfr_resume(struct device *dev)
struct stmmac_priv *priv = netdev_priv(ndev);
struct platform_device *pdev = to_platform_device(dev);
- if (priv->plat->init)
+ if (priv->plat->resume)
+ priv->plat->resume(pdev, priv->plat->bsp_priv);
+ else if (priv->plat->init)
priv->plat->init(pdev, priv->plat->bsp_priv);
- return stmmac_resume(ndev);
+ return stmmac_resume(dev);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 9cc45649f477..a2371aa14a49 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6431,7 +6431,7 @@ static int niu_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void niu_netif_stop(struct niu *np)
{
- np->dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(np->dev); /* prevent tx timeout */
niu_disable_napi(np);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 2437227712dc..d6ad0fbd054e 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -226,7 +226,7 @@ static void gem_put_cell(struct gem *gp)
static inline void gem_netif_stop(struct gem *gp)
{
- gp->dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(gp->dev); /* prevent tx timeout */
napi_disable(&gp->napi);
netif_tx_disable(gp->dev);
}
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index af11ed1e0bcc..4490ebaed127 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -46,7 +46,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
-#include <linux/version.h>
#include <linux/device.h>
#include <linux/bitrev.h>
@@ -598,7 +597,6 @@ struct net_local {
struct work_struct txtimeout_reinit;
phy_interface_t phy_interface;
- struct phy_device *phy_dev;
struct mii_bus *mii_bus;
unsigned int link;
@@ -816,7 +814,7 @@ static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
if (!netif_running(ndev))
return -EINVAL;
@@ -850,6 +848,7 @@ static void dwceqos_link_down(struct net_local *lp)
static void dwceqos_link_up(struct net_local *lp)
{
+ struct net_device *ndev = lp->ndev;
u32 regval;
unsigned long flags;
@@ -860,7 +859,7 @@ static void dwceqos_link_up(struct net_local *lp)
dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
spin_unlock_irqrestore(&lp->hw_lock, flags);
- lp->eee_active = !phy_init_eee(lp->phy_dev, 0);
+ lp->eee_active = !phy_init_eee(ndev->phydev, 0);
/* Check for changed EEE capability */
if (!lp->eee_active && lp->eee_enabled) {
@@ -876,7 +875,8 @@ static void dwceqos_link_up(struct net_local *lp)
static void dwceqos_set_speed(struct net_local *lp)
{
- struct phy_device *phydev = lp->phy_dev;
+ struct net_device *ndev = lp->ndev;
+ struct phy_device *phydev = ndev->phydev;
u32 regval;
regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
@@ -903,7 +903,7 @@ static void dwceqos_set_speed(struct net_local *lp)
static void dwceqos_adjust_link(struct net_device *ndev)
{
struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
+ struct phy_device *phydev = ndev->phydev;
int status_change = 0;
if (lp->phy_defer)
@@ -949,7 +949,7 @@ static void dwceqos_adjust_link(struct net_device *ndev)
if (status_change) {
if (phydev->link) {
- lp->ndev->trans_start = jiffies;
+ netif_trans_update(lp->ndev);
dwceqos_link_up(lp);
} else {
dwceqos_link_down(lp);
@@ -987,7 +987,6 @@ static int dwceqos_mii_probe(struct net_device *ndev)
lp->link = 0;
lp->speed = 0;
lp->duplex = DUPLEX_UNKNOWN;
- lp->phy_dev = phydev;
return 0;
}
@@ -1247,7 +1246,7 @@ static int dwceqos_mii_init(struct net_local *lp)
lp->mii_bus->read = &dwceqos_mdio_read;
lp->mii_bus->write = &dwceqos_mdio_write;
lp->mii_bus->priv = lp;
- lp->mii_bus->parent = &lp->ndev->dev;
+ lp->mii_bus->parent = &lp->pdev->dev;
of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
@@ -1531,6 +1530,7 @@ static void dwceqos_configure_bus(struct net_local *lp)
static void dwceqos_init_hw(struct net_local *lp)
{
+ struct net_device *ndev = lp->ndev;
u32 regval;
u32 buswidth;
u32 dma_skip;
@@ -1622,13 +1622,7 @@ static void dwceqos_init_hw(struct net_local *lp)
DWCEQOS_MMC_CTRL_RSTONRD);
dwceqos_enable_mmc_interrupt(lp);
- /* Enable Interrupts */
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
- DWCEQOS_DMA_CH0_IE_NIE |
- DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
- DWCEQOS_DMA_CH0_IE_AIE |
- DWCEQOS_DMA_CH0_IE_FBEE);
-
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0);
dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
@@ -1645,10 +1639,10 @@ static void dwceqos_init_hw(struct net_local *lp)
regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
lp->phy_defer = false;
- mutex_lock(&lp->phy_dev->lock);
- phy_read_status(lp->phy_dev);
+ mutex_lock(&ndev->phydev->lock);
+ phy_read_status(ndev->phydev);
dwceqos_adjust_link(lp->ndev);
- mutex_unlock(&lp->phy_dev->lock);
+ mutex_unlock(&ndev->phydev->lock);
}
static void dwceqos_tx_reclaim(unsigned long data)
@@ -1898,13 +1892,22 @@ static int dwceqos_open(struct net_device *ndev)
* hence the unusual init order with phy_start first.
*/
lp->phy_defer = true;
- phy_start(lp->phy_dev);
+ phy_start(ndev->phydev);
dwceqos_init_hw(lp);
napi_enable(&lp->napi);
netif_start_queue(ndev);
tasklet_enable(&lp->tx_bdreclaim_tasklet);
+ /* Enable Interrupts -- do this only after we enable NAPI and the
+ * tasklet.
+ */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
+ DWCEQOS_DMA_CH0_IE_NIE |
+ DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
+ DWCEQOS_DMA_CH0_IE_AIE |
+ DWCEQOS_DMA_CH0_IE_FBEE);
+
return 0;
}
@@ -1943,7 +1946,7 @@ static int dwceqos_stop(struct net_device *ndev)
dwceqos_drain_dma(lp);
dwceqos_reset_hw(lp);
- phy_stop(lp->phy_dev);
+ phy_stop(ndev->phydev);
dwceqos_descriptor_free(lp);
@@ -2203,7 +2206,7 @@ static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
netdev_sent_queue(ndev, skb->len);
spin_unlock_bh(&lp->tx_lock);
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
return 0;
tx_error:
@@ -2523,30 +2526,6 @@ dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
return s;
}
-static int
-dwceqos_get_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_gset(phydev, ecmd);
-}
-
-static int
-dwceqos_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
-
- if (!phydev)
- return -ENODEV;
-
- return phy_ethtool_sset(phydev, ecmd);
-}
-
static void
dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
{
@@ -2574,17 +2553,17 @@ static int dwceqos_set_pauseparam(struct net_device *ndev,
lp->flowcontrol.autoneg = pp->autoneg;
if (pp->autoneg) {
- lp->phy_dev->advertising |= ADVERTISED_Pause;
- lp->phy_dev->advertising |= ADVERTISED_Asym_Pause;
+ ndev->phydev->advertising |= ADVERTISED_Pause;
+ ndev->phydev->advertising |= ADVERTISED_Asym_Pause;
} else {
- lp->phy_dev->advertising &= ~ADVERTISED_Pause;
- lp->phy_dev->advertising &= ~ADVERTISED_Asym_Pause;
+ ndev->phydev->advertising &= ~ADVERTISED_Pause;
+ ndev->phydev->advertising &= ~ADVERTISED_Asym_Pause;
lp->flowcontrol.rx = pp->rx_pause;
lp->flowcontrol.tx = pp->tx_pause;
}
if (netif_running(ndev))
- ret = phy_start_aneg(lp->phy_dev);
+ ret = phy_start_aneg(ndev->phydev);
return ret;
}
@@ -2705,7 +2684,7 @@ static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
dwceqos_get_tx_lpi_state(regval));
}
- return phy_ethtool_get_eee(lp->phy_dev, edata);
+ return phy_ethtool_get_eee(ndev->phydev, edata);
}
static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
@@ -2747,7 +2726,7 @@ static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
spin_unlock_irqrestore(&lp->hw_lock, flags);
}
- return phy_ethtool_set_eee(lp->phy_dev, edata);
+ return phy_ethtool_set_eee(ndev->phydev, edata);
}
static u32 dwceqos_get_msglevel(struct net_device *ndev)
@@ -2765,8 +2744,6 @@ static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
}
static struct ethtool_ops dwceqos_ethtool_ops = {
- .get_settings = dwceqos_get_settings,
- .set_settings = dwceqos_set_settings,
.get_drvinfo = dwceqos_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_pauseparam = dwceqos_get_pauseparam,
@@ -2780,6 +2757,8 @@ static struct ethtool_ops dwceqos_ethtool_ops = {
.set_eee = dwceqos_set_eee,
.get_msglevel = dwceqos_get_msglevel,
.set_msglevel = dwceqos_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static struct net_device_ops netdev_ops = {
@@ -2874,25 +2853,17 @@ static int dwceqos_probe(struct platform_device *pdev)
ndev->features = ndev->hw_features;
- netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
-
- ret = register_netdev(ndev);
- if (ret) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_clk_dis_aper;
- }
-
lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
if (IS_ERR(lp->phy_ref_clk)) {
dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
ret = PTR_ERR(lp->phy_ref_clk);
- goto err_out_unregister_netdev;
+ goto err_out_clk_dis_aper;
}
ret = clk_prepare_enable(lp->phy_ref_clk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable device clock.\n");
- goto err_out_unregister_netdev;
+ goto err_out_clk_dis_aper;
}
lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
@@ -2901,7 +2872,7 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
if (ret < 0) {
dev_err(&pdev->dev, "invalid fixed-link");
- goto err_out_unregister_netdev;
+ goto err_out_clk_dis_phy;
}
lp->phy_node = of_node_get(lp->pdev->dev.of_node);
@@ -2910,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = of_get_phy_mode(lp->pdev->dev.of_node);
if (ret < 0) {
dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
lp->phy_interface = ret;
@@ -2918,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = dwceqos_mii_init(lp);
if (ret) {
dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
ret = dwceqos_mii_probe(ndev);
if (ret != 0) {
netdev_err(ndev, "mii_probe fail.\n");
ret = -ENXIO;
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
@@ -2934,7 +2905,8 @@ static int dwceqos_probe(struct platform_device *pdev)
(unsigned long)ndev);
tasklet_disable(&lp->tx_bdreclaim_tasklet);
- lp->txtimeout_handler_wq = create_singlethread_workqueue(DRIVER_NAME);
+ lp->txtimeout_handler_wq = alloc_workqueue(DRIVER_NAME,
+ WQ_MEM_RECLAIM, 0);
INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout);
platform_set_drvdata(pdev, ndev);
@@ -2942,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev)
if (ret) {
dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
ret);
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
pdev->id, ndev->base_addr, ndev->irq);
@@ -2952,18 +2924,24 @@ static int dwceqos_probe(struct platform_device *pdev)
if (ret) {
dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
ndev->irq, ret);
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
if (netif_msg_probe(lp))
netdev_dbg(ndev, "net_local@%p\n", lp);
+ netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_clk_dis_phy;
+ }
+
return 0;
-err_out_unregister_clk_notifier:
+err_out_clk_dis_phy:
clk_disable_unprepare(lp->phy_ref_clk);
-err_out_unregister_netdev:
- unregister_netdev(ndev);
err_out_clk_dis_aper:
clk_disable_unprepare(lp->apb_pclk);
err_out_free_netdev:
@@ -2981,8 +2959,8 @@ static int dwceqos_remove(struct platform_device *pdev)
if (ndev) {
lp = netdev_priv(ndev);
- if (lp->phy_dev)
- phy_disconnect(lp->phy_dev);
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
mdiobus_unregister(lp->mii_bus);
mdiobus_free(lp->mii_bus);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 14c9d1baa85c..7108c68f16d3 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1610,7 +1610,6 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
* o NETDEV_TX_BUSY Cannot transmit packet, try later
* Usually a bug, means queue start/stop flow control is broken in
* the driver. Note: the driver must NOT put the skb in its DMA ring.
- * o NETDEV_TX_LOCKED Locking failed, please retry quickly.
*/
static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
struct net_device *ndev)
@@ -1630,12 +1629,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
ENTER;
local_irq_save(flags);
- if (!spin_trylock(&priv->tx_lock)) {
- local_irq_restore(flags);
- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
- BDX_DRV_NAME, ndev->name);
- return NETDEV_TX_LOCKED;
- }
+ spin_lock(&priv->tx_lock);
/* build tx descriptor */
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
@@ -1707,7 +1701,7 @@ static netdev_tx_t bdx_tx_transmit(struct sk_buff *skb,
#endif
#ifdef BDX_LLTX
- ndev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
+ netif_trans_update(ndev); /* NETIF_F_LLTX driver :( */
#endif
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += skb->len;
@@ -1993,7 +1987,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
err = pci_enable_msi(pdev);
if (err)
- pr_err("Can't eneble msi. error is %d\n", err);
+ pr_err("Can't enable msi. error is %d\n", err);
else
nic->irq_type = IRQ_MSI;
} else
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index e7f0b7d95b65..9904d740d528 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -48,8 +48,7 @@ config TI_DAVINCI_CPDMA
will be called davinci_cpdma. This is recommended.
config TI_CPSW_PHY_SEL
- bool "TI CPSW Switch Phy sel Support"
- depends on TI_CPSW
+ bool
---help---
This driver supports configuring of the phy mode connected to
the CPSW.
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 7eef45e6d70a..d300d536d06f 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -205,7 +205,6 @@ struct cpmac_priv {
dma_addr_t dma_ring;
void __iomem *regs;
struct mii_bus *mii_bus;
- struct phy_device *phy;
char phy_name[MII_BUS_ID_SIZE + 3];
int oldlink, oldspeed, oldduplex;
u32 msg_enable;
@@ -830,37 +829,12 @@ static void cpmac_tx_timeout(struct net_device *dev)
static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
- struct cpmac_priv *priv = netdev_priv(dev);
-
if (!(netif_running(dev)))
return -EINVAL;
- if (!priv->phy)
+ if (!dev->phydev)
return -EINVAL;
- return phy_mii_ioctl(priv->phy, ifr, cmd);
-}
-
-static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct cpmac_priv *priv = netdev_priv(dev);
-
- if (priv->phy)
- return phy_ethtool_gset(priv->phy, cmd);
-
- return -EINVAL;
-}
-
-static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct cpmac_priv *priv = netdev_priv(dev);
-
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- if (priv->phy)
- return phy_ethtool_sset(priv->phy, cmd);
-
- return -EINVAL;
+ return phy_mii_ioctl(dev->phydev, ifr, cmd);
}
static void cpmac_get_ringparam(struct net_device *dev,
@@ -900,12 +874,12 @@ static void cpmac_get_drvinfo(struct net_device *dev,
}
static const struct ethtool_ops cpmac_ethtool_ops = {
- .get_settings = cpmac_get_settings,
- .set_settings = cpmac_set_settings,
.get_drvinfo = cpmac_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = cpmac_get_ringparam,
.set_ringparam = cpmac_set_ringparam,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static void cpmac_adjust_link(struct net_device *dev)
@@ -914,16 +888,16 @@ static void cpmac_adjust_link(struct net_device *dev)
int new_state = 0;
spin_lock(&priv->lock);
- if (priv->phy->link) {
+ if (dev->phydev->link) {
netif_tx_start_all_queues(dev);
- if (priv->phy->duplex != priv->oldduplex) {
+ if (dev->phydev->duplex != priv->oldduplex) {
new_state = 1;
- priv->oldduplex = priv->phy->duplex;
+ priv->oldduplex = dev->phydev->duplex;
}
- if (priv->phy->speed != priv->oldspeed) {
+ if (dev->phydev->speed != priv->oldspeed) {
new_state = 1;
- priv->oldspeed = priv->phy->speed;
+ priv->oldspeed = dev->phydev->speed;
}
if (!priv->oldlink) {
@@ -938,7 +912,7 @@ static void cpmac_adjust_link(struct net_device *dev)
}
if (new_state && netif_msg_link(priv) && net_ratelimit())
- phy_print_status(priv->phy);
+ phy_print_status(dev->phydev);
spin_unlock(&priv->lock);
}
@@ -1016,8 +990,8 @@ static int cpmac_open(struct net_device *dev)
cpmac_hw_start(dev);
napi_enable(&priv->napi);
- priv->phy->state = PHY_CHANGELINK;
- phy_start(priv->phy);
+ dev->phydev->state = PHY_CHANGELINK;
+ phy_start(dev->phydev);
return 0;
@@ -1032,8 +1006,10 @@ fail_desc:
kfree_skb(priv->rx_head[i].skb);
}
}
+ dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * size,
+ priv->desc_ring, priv->dma_ring);
+
fail_alloc:
- kfree(priv->desc_ring);
iounmap(priv->regs);
fail_remap:
@@ -1053,7 +1029,7 @@ static int cpmac_stop(struct net_device *dev)
cancel_work_sync(&priv->reset_work);
napi_disable(&priv->napi);
- phy_stop(priv->phy);
+ phy_stop(dev->phydev);
cpmac_hw_stop(dev);
@@ -1106,6 +1082,7 @@ static int cpmac_probe(struct platform_device *pdev)
struct cpmac_priv *priv;
struct net_device *dev;
struct plat_cpmac_data *pdata;
+ struct phy_device *phydev = NULL;
pdata = dev_get_platdata(&pdev->dev);
@@ -1142,7 +1119,7 @@ static int cpmac_probe(struct platform_device *pdev)
mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
if (!mem) {
rc = -ENODEV;
- goto out;
+ goto fail;
}
dev->irq = platform_get_irq_byname(pdev, "irq");
@@ -1162,15 +1139,15 @@ static int cpmac_probe(struct platform_device *pdev)
snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
mdio_bus_id, phy_id);
- priv->phy = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
- PHY_INTERFACE_MODE_MII);
+ phydev = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
- if (IS_ERR(priv->phy)) {
+ if (IS_ERR(phydev)) {
if (netif_msg_drv(priv))
dev_err(&pdev->dev, "Could not attach to PHY\n");
- rc = PTR_ERR(priv->phy);
- goto out;
+ rc = PTR_ERR(phydev);
+ goto fail;
}
rc = register_netdev(dev);
@@ -1189,7 +1166,6 @@ static int cpmac_probe(struct platform_device *pdev)
fail:
free_netdev(dev);
-out:
return rc;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index e2fcdf1eec44..f85d605e4560 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -364,7 +364,6 @@ static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
}
struct cpsw_priv {
- spinlock_t lock;
struct platform_device *pdev;
struct net_device *ndev;
struct napi_struct napi_rx;
@@ -380,7 +379,6 @@ struct cpsw_priv {
u32 coal_intvl;
u32 bus_freq_mhz;
int rx_packet_max;
- int host_port;
struct clk *clk;
u8 mac_addr[ETH_ALEN];
struct cpsw_slave *slaves;
@@ -530,21 +528,18 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = {
int slave_port = cpsw_get_slave_port(priv, \
slave->slave_num); \
cpsw_ale_add_mcast(priv->ale, addr, \
- 1 << slave_port | 1 << priv->host_port, \
+ 1 << slave_port | ALE_PORT_HOST, \
ALE_VLAN, slave->port_vlan, 0); \
} else { \
cpsw_ale_add_mcast(priv->ale, addr, \
- ALE_ALL_PORTS << priv->host_port, \
+ ALE_ALL_PORTS, \
0, 0, 0); \
} \
} while (0)
static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
{
- if (priv->host_port == 0)
- return slave_num + 1;
- else
- return slave_num;
+ return slave_num + 1;
}
static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
@@ -601,8 +596,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
/* Clear all mcast from ALE */
- cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
- priv->host_port, -1);
+ cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
/* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -647,8 +641,7 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
/* Clear all mcast from ALE */
- cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
- vid);
+ cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS, vid);
if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
@@ -741,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
ndev->stats.rx_packets++;
+ kmemleak_not_leak(new_skb);
} else {
ndev->stats.rx_dropped++;
new_skb = skb;
@@ -1091,7 +1085,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
struct cpsw_priv *priv, struct cpsw_slave *slave,
u32 slave_port)
{
- u32 port_mask = 1 << slave_port | 1 << priv->host_port;
+ u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
if (priv->version == CPSW_VERSION_1)
slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
@@ -1102,7 +1096,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
port_mask, ALE_VLAN, slave->port_vlan, 0);
cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan);
+ HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, slave->port_vlan);
}
static void soft_reset_slave(struct cpsw_slave *slave)
@@ -1180,7 +1174,6 @@ static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
{
const int vlan = priv->data.default_vlan;
- const int port = priv->host_port;
u32 reg;
int i;
int unreg_mcast_mask;
@@ -1198,9 +1191,9 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
else
unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
- cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
- ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
- unreg_mcast_mask << port);
+ cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS,
+ ALE_ALL_PORTS, ALE_ALL_PORTS,
+ unreg_mcast_mask);
}
static void cpsw_init_host_port(struct cpsw_priv *priv)
@@ -1213,7 +1206,7 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
cpsw_ale_start(priv->ale);
/* switch to vlan unaware mode */
- cpsw_ale_control_set(priv->ale, priv->host_port, ALE_VLAN_AWARE,
+ cpsw_ale_control_set(priv->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
CPSW_ALE_VLAN_AWARE);
control_reg = readl(&priv->regs->control);
control_reg |= CPSW_VLAN_AWARE;
@@ -1227,14 +1220,14 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
&priv->host_port_regs->cpdma_tx_pri_map);
__raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
- cpsw_ale_control_set(priv->ale, priv->host_port,
+ cpsw_ale_control_set(priv->ale, HOST_PORT_NUM,
ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
if (!priv->data.dual_emac) {
- cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port,
+ cpsw_ale_add_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM,
0, 0);
cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
- 1 << priv->host_port, 0, 0, ALE_MCAST_FWD_2);
+ ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
}
}
@@ -1251,6 +1244,7 @@ static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
slave->phy = NULL;
cpsw_ale_control_set(priv->ale, slave_port,
ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+ soft_reset_slave(slave);
}
static int cpsw_ndo_open(struct net_device *ndev)
@@ -1259,7 +1253,11 @@ static int cpsw_ndo_open(struct net_device *ndev)
int i, ret;
u32 reg;
- pm_runtime_get_sync(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ return ret;
+ }
if (!cpsw_common_res_usage_state(priv))
cpsw_intr_disable(priv);
@@ -1281,11 +1279,11 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_add_default_vlan(priv);
else
cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
- ALE_ALL_PORTS << priv->host_port,
- ALE_ALL_PORTS << priv->host_port, 0, 0);
+ ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
if (!cpsw_common_res_usage_state(priv)) {
struct cpsw_priv *priv_sl0 = cpsw_get_slave_priv(priv, 0);
+ int buf_num;
/* setup tx dma to fixed prio and zero offset */
cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
@@ -1313,10 +1311,8 @@ static int cpsw_ndo_open(struct net_device *ndev)
enable_irq(priv->irqs_table[0]);
}
- if (WARN_ON(!priv->data.rx_descs))
- priv->data.rx_descs = 128;
-
- for (i = 0; i < priv->data.rx_descs; i++) {
+ buf_num = cpdma_chan_get_rx_buf_num(priv->dma);
+ for (i = 0; i < buf_num; i++) {
struct sk_buff *skb;
ret = -ENOMEM;
@@ -1330,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
kfree_skb(skb);
goto err_cleanup;
}
+ kmemleak_not_leak(skb);
}
/* continue even if we didn't manage to submit all
* receive descs
@@ -1347,7 +1344,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
if (priv->coal_intvl != 0) {
struct ethtool_coalesce coal;
- coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+ coal.rx_coalesce_usecs = priv->coal_intvl;
cpsw_set_coalesce(ndev, &coal);
}
@@ -1397,7 +1394,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
struct cpsw_priv *priv = netdev_priv(ndev);
int ret;
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
cpsw_err(priv, tx_err, "packet pad failed\n");
@@ -1619,24 +1616,33 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
struct sockaddr *addr = (struct sockaddr *)p;
int flags = 0;
u16 vid = 0;
+ int ret;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ return ret;
+ }
+
if (priv->data.dual_emac) {
vid = priv->slaves[priv->emac_port].port_vlan;
flags = ALE_VLAN;
}
- cpsw_ale_del_ucast(priv->ale, priv->mac_addr, priv->host_port,
+ cpsw_ale_del_ucast(priv->ale, priv->mac_addr, HOST_PORT_NUM,
flags, vid);
- cpsw_ale_add_ucast(priv->ale, addr->sa_data, priv->host_port,
+ cpsw_ale_add_ucast(priv->ale, addr->sa_data, HOST_PORT_NUM,
flags, vid);
memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
for_each_slave(priv, cpsw_set_slave_mac, priv);
+ pm_runtime_put(&priv->pdev->dev);
+
return 0;
}
@@ -1674,12 +1680,12 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
}
ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask,
- unreg_mcast_mask << priv->host_port);
+ unreg_mcast_mask);
if (ret != 0)
return ret;
ret = cpsw_ale_add_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN, vid);
+ HOST_PORT_NUM, ALE_VLAN, vid);
if (ret != 0)
goto clean_vid;
@@ -1691,7 +1697,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
clean_vlan_ucast:
cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN, vid);
+ HOST_PORT_NUM, ALE_VLAN, vid);
clean_vid:
cpsw_ale_del_vlan(priv->ale, vid, 0);
return ret;
@@ -1701,10 +1707,17 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
__be16 proto, u16 vid)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
if (vid == priv->data.default_vlan)
return 0;
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ return ret;
+ }
+
if (priv->data.dual_emac) {
/* In dual EMAC, reserved VLAN id should not be used for
* creating VLAN interfaces as this can break the dual
@@ -1719,7 +1732,10 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
}
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
- return cpsw_add_vlan_ale_entry(priv, vid);
+ ret = cpsw_add_vlan_ale_entry(priv, vid);
+
+ pm_runtime_put(&priv->pdev->dev);
+ return ret;
}
static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
@@ -1731,6 +1747,12 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (vid == priv->data.default_vlan)
return 0;
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ return ret;
+ }
+
if (priv->data.dual_emac) {
int i;
@@ -1746,12 +1768,14 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
return ret;
ret = cpsw_ale_del_ucast(priv->ale, priv->mac_addr,
- priv->host_port, ALE_VLAN, vid);
+ HOST_PORT_NUM, ALE_VLAN, vid);
if (ret != 0)
return ret;
- return cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
- 0, ALE_VLAN, vid);
+ ret = cpsw_ale_del_mcast(priv->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+ pm_runtime_put(&priv->pdev->dev);
+ return ret;
}
static const struct net_device_ops cpsw_netdev_ops = {
@@ -1910,10 +1934,33 @@ static int cpsw_set_pauseparam(struct net_device *ndev,
priv->tx_pause = pause->tx_pause ? true : false;
for_each_slave(priv, _cpsw_adjust_link, priv, &link);
-
return 0;
}
+static int cpsw_ethtool_op_begin(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ }
+
+ return ret;
+}
+
+static void cpsw_ethtool_op_complete(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_put(&priv->pdev->dev);
+ if (ret < 0)
+ cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
+}
+
static const struct ethtool_ops cpsw_ethtool_ops = {
.get_drvinfo = cpsw_get_drvinfo,
.get_msglevel = cpsw_get_msglevel,
@@ -1933,6 +1980,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
.set_wol = cpsw_set_wol,
.get_regs_len = cpsw_get_regs_len,
.get_regs = cpsw_get_regs,
+ .begin = cpsw_ethtool_op_begin,
+ .complete = cpsw_ethtool_op_complete,
};
static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
@@ -2007,12 +2056,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
data->bd_ram_size = prop;
- if (of_property_read_u32(node, "rx_descs", &prop)) {
- dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n");
- return -EINVAL;
- }
- data->rx_descs = prop;
-
if (of_property_read_u32(node, "mac_control", &prop)) {
dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
return -EINVAL;
@@ -2030,7 +2073,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
if (ret)
dev_warn(&pdev->dev, "Doesn't have any child node\n");
- for_each_child_of_node(node, slave_node) {
+ for_each_available_child_of_node(node, slave_node) {
struct cpsw_slave_data *slave_data = data->slave_data + i;
const void *mac_addr = NULL;
int lenp;
@@ -2132,7 +2175,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
}
priv_sl2 = netdev_priv(ndev);
- spin_lock_init(&priv_sl2->lock);
priv_sl2->data = *data;
priv_sl2->pdev = pdev;
priv_sl2->ndev = ndev;
@@ -2157,7 +2199,6 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
priv_sl2->bus_freq_mhz = priv->bus_freq_mhz;
priv_sl2->regs = priv->regs;
- priv_sl2->host_port = priv->host_port;
priv_sl2->host_port_regs = priv->host_port_regs;
priv_sl2->wr_regs = priv->wr_regs;
priv_sl2->hw_stats = priv->hw_stats;
@@ -2252,7 +2293,6 @@ static int cpsw_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
priv = netdev_priv(ndev);
- spin_lock_init(&priv->lock);
priv->pdev = pdev;
priv->ndev = ndev;
priv->dev = &ndev->dev;
@@ -2326,12 +2366,15 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_runtime_disable_ret;
}
priv->regs = ss_regs;
- priv->host_port = HOST_PORT_NUM;
/* Need to enable clocks with runtime PM api to access module
* registers
*/
- pm_runtime_get_sync(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ goto clean_runtime_disable_ret;
+ }
priv->version = readl(&priv->regs->id_ver);
pm_runtime_put_sync(&pdev->dev);
@@ -2515,8 +2558,6 @@ static int cpsw_probe(struct platform_device *pdev)
clean_ale_ret:
cpsw_ale_destroy(priv->ale);
clean_dma_ret:
- cpdma_chan_destroy(priv->txch);
- cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
@@ -2525,30 +2566,27 @@ clean_ndev_ret:
return ret;
}
-static int cpsw_remove_child_device(struct device *dev, void *c)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- of_device_unregister(pdev);
-
- return 0;
-}
-
static int cpsw_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ return ret;
+ }
if (priv->data.dual_emac)
unregister_netdev(cpsw_get_slave_ndev(priv, 1));
unregister_netdev(ndev);
cpsw_ale_destroy(priv->ale);
- cpdma_chan_destroy(priv->txch);
- cpdma_chan_destroy(priv->rxch);
cpdma_ctlr_destroy(priv->dma);
+ of_platform_depopulate(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- device_for_each_child(&pdev->dev, NULL, cpsw_remove_child_device);
if (priv->data.dual_emac)
free_netdev(cpsw_get_slave_ndev(priv, 1));
free_netdev(ndev);
@@ -2568,16 +2606,12 @@ static int cpsw_suspend(struct device *dev)
for (i = 0; i < priv->data.slaves; i++) {
if (netif_running(priv->slaves[i].ndev))
cpsw_ndo_stop(priv->slaves[i].ndev);
- soft_reset_slave(priv->slaves + i);
}
} else {
if (netif_running(ndev))
cpsw_ndo_stop(ndev);
- for_each_slave(priv, soft_reset_slave);
}
- pm_runtime_put_sync(&pdev->dev);
-
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(&pdev->dev);
@@ -2590,8 +2624,6 @@ static int cpsw_resume(struct device *dev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_priv *priv = netdev_priv(ndev);
- pm_runtime_get_sync(&pdev->dev);
-
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
index e50afd1b2eda..16b54c6f32c2 100644
--- a/drivers/net/ethernet/ti/cpsw.h
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -35,7 +35,6 @@ struct cpsw_platform_data {
u32 cpts_clock_shift; /* convert input clock ticks to nanoseconds */
u32 ale_entries; /* ale table size */
u32 bd_ram_size; /*buffer descriptor ram size */
- u32 rx_descs; /* Number of Rx Descriptios */
u32 mac_control; /* Mac control register */
u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
bool dual_emac; /* Enable Dual EMAC mode */
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 18bf3a8fdc50..19e5f32a8a64 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -21,7 +21,7 @@
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/delay.h>
-
+#include <linux/genalloc.h>
#include "davinci_cpdma.h"
/* DMA Registers */
@@ -87,9 +87,8 @@ struct cpdma_desc_pool {
void *cpumap; /* dma_alloc map */
int desc_size, mem_size;
int num_desc, used_desc;
- unsigned long *bitmap;
struct device *dev;
- spinlock_t lock;
+ struct gen_pool *gen_pool;
};
enum cpdma_state {
@@ -98,8 +97,6 @@ enum cpdma_state {
CPDMA_STATE_TEARDOWN,
};
-static const char *cpdma_state_str[] = { "idle", "active", "teardown" };
-
struct cpdma_ctlr {
enum cpdma_state state;
struct cpdma_params params;
@@ -117,6 +114,7 @@ struct cpdma_chan {
int chan_num;
spinlock_t lock;
int count;
+ u32 desc_num;
u32 mask;
cpdma_handler_fn handler;
enum dma_data_direction dir;
@@ -145,6 +143,19 @@ struct cpdma_chan {
(directed << CPDMA_TO_PORT_SHIFT)); \
} while (0)
+static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
+{
+ if (!pool)
+ return;
+
+ WARN_ON(pool->used_desc);
+ if (pool->cpumap)
+ dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
+ pool->phys);
+ else
+ iounmap(pool->iomap);
+}
+
/*
* Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
* emac) have dedicated on-chip memory for these descriptors. Some other
@@ -155,24 +166,25 @@ static struct cpdma_desc_pool *
cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
int size, int align)
{
- int bitmap_size;
struct cpdma_desc_pool *pool;
+ int ret;
pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
if (!pool)
- goto fail;
-
- spin_lock_init(&pool->lock);
+ goto gen_pool_create_fail;
pool->dev = dev;
pool->mem_size = size;
pool->desc_size = ALIGN(sizeof(struct cpdma_desc), align);
pool->num_desc = size / pool->desc_size;
- bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
- pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
- if (!pool->bitmap)
- goto fail;
+ pool->gen_pool = devm_gen_pool_create(dev, ilog2(pool->desc_size), -1,
+ "cpdma");
+ if (IS_ERR(pool->gen_pool)) {
+ dev_err(dev, "pool create failed %ld\n",
+ PTR_ERR(pool->gen_pool));
+ goto gen_pool_create_fail;
+ }
if (phys) {
pool->phys = phys;
@@ -185,24 +197,22 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, dma_addr_t hw_addr,
pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
}
- if (pool->iomap)
- return pool;
-fail:
- return NULL;
-}
+ if (!pool->iomap)
+ goto gen_pool_create_fail;
-static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
-{
- if (!pool)
- return;
-
- WARN_ON(pool->used_desc);
- if (pool->cpumap) {
- dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
- pool->phys);
- } else {
- iounmap(pool->iomap);
+ ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
+ pool->phys, pool->mem_size, -1);
+ if (ret < 0) {
+ dev_err(dev, "pool add failed %d\n", ret);
+ goto gen_pool_add_virt_fail;
}
+
+ return pool;
+
+gen_pool_add_virt_fail:
+ cpdma_desc_pool_destroy(pool);
+gen_pool_create_fail:
+ return NULL;
}
static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -220,47 +230,23 @@ desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
}
static struct cpdma_desc __iomem *
-cpdma_desc_alloc(struct cpdma_desc_pool *pool, int num_desc, bool is_rx)
+cpdma_desc_alloc(struct cpdma_desc_pool *pool)
{
- unsigned long flags;
- int index;
- int desc_start;
- int desc_end;
struct cpdma_desc __iomem *desc = NULL;
- spin_lock_irqsave(&pool->lock, flags);
-
- if (is_rx) {
- desc_start = 0;
- desc_end = pool->num_desc/2;
- } else {
- desc_start = pool->num_desc/2;
- desc_end = pool->num_desc;
- }
-
- index = bitmap_find_next_zero_area(pool->bitmap,
- desc_end, desc_start, num_desc, 0);
- if (index < desc_end) {
- bitmap_set(pool->bitmap, index, num_desc);
- desc = pool->iomap + pool->desc_size * index;
+ desc = (struct cpdma_desc __iomem *)gen_pool_alloc(pool->gen_pool,
+ pool->desc_size);
+ if (desc)
pool->used_desc++;
- }
- spin_unlock_irqrestore(&pool->lock, flags);
return desc;
}
static void cpdma_desc_free(struct cpdma_desc_pool *pool,
struct cpdma_desc __iomem *desc, int num_desc)
{
- unsigned long flags, index;
-
- index = ((unsigned long)desc - (unsigned long)pool->iomap) /
- pool->desc_size;
- spin_lock_irqsave(&pool->lock, flags);
- bitmap_clear(pool->bitmap, index, num_desc);
+ gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
pool->used_desc--;
- spin_unlock_irqrestore(&pool->lock, flags);
}
struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
@@ -369,86 +355,13 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
}
EXPORT_SYMBOL_GPL(cpdma_ctlr_stop);
-int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr)
-{
- struct device *dev = ctlr->dev;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ctlr->lock, flags);
-
- dev_info(dev, "CPDMA: state: %s", cpdma_state_str[ctlr->state]);
-
- dev_info(dev, "CPDMA: txidver: %x",
- dma_reg_read(ctlr, CPDMA_TXIDVER));
- dev_info(dev, "CPDMA: txcontrol: %x",
- dma_reg_read(ctlr, CPDMA_TXCONTROL));
- dev_info(dev, "CPDMA: txteardown: %x",
- dma_reg_read(ctlr, CPDMA_TXTEARDOWN));
- dev_info(dev, "CPDMA: rxidver: %x",
- dma_reg_read(ctlr, CPDMA_RXIDVER));
- dev_info(dev, "CPDMA: rxcontrol: %x",
- dma_reg_read(ctlr, CPDMA_RXCONTROL));
- dev_info(dev, "CPDMA: softreset: %x",
- dma_reg_read(ctlr, CPDMA_SOFTRESET));
- dev_info(dev, "CPDMA: rxteardown: %x",
- dma_reg_read(ctlr, CPDMA_RXTEARDOWN));
- dev_info(dev, "CPDMA: txintstatraw: %x",
- dma_reg_read(ctlr, CPDMA_TXINTSTATRAW));
- dev_info(dev, "CPDMA: txintstatmasked: %x",
- dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED));
- dev_info(dev, "CPDMA: txintmaskset: %x",
- dma_reg_read(ctlr, CPDMA_TXINTMASKSET));
- dev_info(dev, "CPDMA: txintmaskclear: %x",
- dma_reg_read(ctlr, CPDMA_TXINTMASKCLEAR));
- dev_info(dev, "CPDMA: macinvector: %x",
- dma_reg_read(ctlr, CPDMA_MACINVECTOR));
- dev_info(dev, "CPDMA: maceoivector: %x",
- dma_reg_read(ctlr, CPDMA_MACEOIVECTOR));
- dev_info(dev, "CPDMA: rxintstatraw: %x",
- dma_reg_read(ctlr, CPDMA_RXINTSTATRAW));
- dev_info(dev, "CPDMA: rxintstatmasked: %x",
- dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED));
- dev_info(dev, "CPDMA: rxintmaskset: %x",
- dma_reg_read(ctlr, CPDMA_RXINTMASKSET));
- dev_info(dev, "CPDMA: rxintmaskclear: %x",
- dma_reg_read(ctlr, CPDMA_RXINTMASKCLEAR));
- dev_info(dev, "CPDMA: dmaintstatraw: %x",
- dma_reg_read(ctlr, CPDMA_DMAINTSTATRAW));
- dev_info(dev, "CPDMA: dmaintstatmasked: %x",
- dma_reg_read(ctlr, CPDMA_DMAINTSTATMASKED));
- dev_info(dev, "CPDMA: dmaintmaskset: %x",
- dma_reg_read(ctlr, CPDMA_DMAINTMASKSET));
- dev_info(dev, "CPDMA: dmaintmaskclear: %x",
- dma_reg_read(ctlr, CPDMA_DMAINTMASKCLEAR));
-
- if (!ctlr->params.has_ext_regs) {
- dev_info(dev, "CPDMA: dmacontrol: %x",
- dma_reg_read(ctlr, CPDMA_DMACONTROL));
- dev_info(dev, "CPDMA: dmastatus: %x",
- dma_reg_read(ctlr, CPDMA_DMASTATUS));
- dev_info(dev, "CPDMA: rxbuffofs: %x",
- dma_reg_read(ctlr, CPDMA_RXBUFFOFS));
- }
-
- for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
- if (ctlr->channels[i])
- cpdma_chan_dump(ctlr->channels[i]);
-
- spin_unlock_irqrestore(&ctlr->lock, flags);
- return 0;
-}
-EXPORT_SYMBOL_GPL(cpdma_ctlr_dump);
-
int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
{
- unsigned long flags;
int ret = 0, i;
if (!ctlr)
return -EINVAL;
- spin_lock_irqsave(&ctlr->lock, flags);
if (ctlr->state != CPDMA_STATE_IDLE)
cpdma_ctlr_stop(ctlr);
@@ -456,7 +369,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
cpdma_chan_destroy(ctlr->channels[i]);
cpdma_desc_pool_destroy(ctlr->pool);
- spin_unlock_irqrestore(&ctlr->lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
@@ -516,6 +428,7 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
chan->state = CPDMA_STATE_IDLE;
chan->chan_num = chan_num;
chan->handler = handler;
+ chan->desc_num = ctlr->pool->num_desc / 2;
if (is_rx_chan(chan)) {
chan->hdp = ctlr->params.rxhdp + offset;
@@ -543,6 +456,12 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
}
EXPORT_SYMBOL_GPL(cpdma_chan_create);
+int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr)
+{
+ return ctlr->pool->num_desc / 2;
+}
+EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
+
int cpdma_chan_destroy(struct cpdma_chan *chan)
{
struct cpdma_ctlr *ctlr;
@@ -574,54 +493,6 @@ int cpdma_chan_get_stats(struct cpdma_chan *chan,
}
EXPORT_SYMBOL_GPL(cpdma_chan_get_stats);
-int cpdma_chan_dump(struct cpdma_chan *chan)
-{
- unsigned long flags;
- struct device *dev = chan->ctlr->dev;
-
- spin_lock_irqsave(&chan->lock, flags);
-
- dev_info(dev, "channel %d (%s %d) state %s",
- chan->chan_num, is_rx_chan(chan) ? "rx" : "tx",
- chan_linear(chan), cpdma_state_str[chan->state]);
- dev_info(dev, "\thdp: %x\n", chan_read(chan, hdp));
- dev_info(dev, "\tcp: %x\n", chan_read(chan, cp));
- if (chan->rxfree) {
- dev_info(dev, "\trxfree: %x\n",
- chan_read(chan, rxfree));
- }
-
- dev_info(dev, "\tstats head_enqueue: %d\n",
- chan->stats.head_enqueue);
- dev_info(dev, "\tstats tail_enqueue: %d\n",
- chan->stats.tail_enqueue);
- dev_info(dev, "\tstats pad_enqueue: %d\n",
- chan->stats.pad_enqueue);
- dev_info(dev, "\tstats misqueued: %d\n",
- chan->stats.misqueued);
- dev_info(dev, "\tstats desc_alloc_fail: %d\n",
- chan->stats.desc_alloc_fail);
- dev_info(dev, "\tstats pad_alloc_fail: %d\n",
- chan->stats.pad_alloc_fail);
- dev_info(dev, "\tstats runt_receive_buff: %d\n",
- chan->stats.runt_receive_buff);
- dev_info(dev, "\tstats runt_transmit_buff: %d\n",
- chan->stats.runt_transmit_buff);
- dev_info(dev, "\tstats empty_dequeue: %d\n",
- chan->stats.empty_dequeue);
- dev_info(dev, "\tstats busy_dequeue: %d\n",
- chan->stats.busy_dequeue);
- dev_info(dev, "\tstats good_dequeue: %d\n",
- chan->stats.good_dequeue);
- dev_info(dev, "\tstats requeue: %d\n",
- chan->stats.requeue);
- dev_info(dev, "\tstats teardown_dequeue: %d\n",
- chan->stats.teardown_dequeue);
-
- spin_unlock_irqrestore(&chan->lock, flags);
- return 0;
-}
-
static void __cpdma_chan_submit(struct cpdma_chan *chan,
struct cpdma_desc __iomem *desc)
{
@@ -675,7 +546,13 @@ int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
goto unlock_ret;
}
- desc = cpdma_desc_alloc(ctlr->pool, 1, is_rx_chan(chan));
+ if (chan->count >= chan->desc_num) {
+ chan->stats.desc_alloc_fail++;
+ ret = -ENOMEM;
+ goto unlock_ret;
+ }
+
+ desc = cpdma_desc_alloc(ctlr->pool);
if (!desc) {
chan->stats.desc_alloc_fail++;
ret = -ENOMEM;
@@ -721,24 +598,16 @@ EXPORT_SYMBOL_GPL(cpdma_chan_submit);
bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
{
- unsigned long flags;
- int index;
- bool ret;
struct cpdma_ctlr *ctlr = chan->ctlr;
struct cpdma_desc_pool *pool = ctlr->pool;
+ bool free_tx_desc;
+ unsigned long flags;
- spin_lock_irqsave(&pool->lock, flags);
-
- index = bitmap_find_next_zero_area(pool->bitmap,
- pool->num_desc, pool->num_desc/2, 1, 0);
-
- if (index < pool->num_desc)
- ret = true;
- else
- ret = false;
-
- spin_unlock_irqrestore(&pool->lock, flags);
- return ret;
+ spin_lock_irqsave(&chan->lock, flags);
+ free_tx_desc = (chan->count < chan->desc_num) &&
+ gen_pool_avail(pool->gen_pool);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return free_tx_desc;
}
EXPORT_SYMBOL_GPL(cpdma_check_free_tx_desc);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index 86dee487f2f0..4b46cd6e9a3f 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -77,14 +77,13 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params);
int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr);
int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
-int cpdma_ctlr_dump(struct cpdma_ctlr *ctlr);
struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
cpdma_handler_fn handler);
+int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr);
int cpdma_chan_destroy(struct cpdma_chan *chan);
int cpdma_chan_start(struct cpdma_chan *chan);
int cpdma_chan_stop(struct cpdma_chan *chan);
-int cpdma_chan_dump(struct cpdma_chan *chan);
int cpdma_chan_get_stats(struct cpdma_chan *chan,
struct cpdma_chan_stats *stats);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index f56d66e6ec15..727a79f3c7dd 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -348,7 +348,6 @@ struct emac_priv {
u32 rx_addr_type;
const char *phy_id;
struct device_node *phy_node;
- struct phy_device *phydev;
spinlock_t lock;
/*platform specific members*/
void (*int_enable) (void);
@@ -380,97 +379,6 @@ static char *emac_rxhost_errcodes[16] = {
#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
/**
- * emac_dump_regs - Dump important EMAC registers to debug terminal
- * @priv: The DaVinci EMAC private adapter structure
- *
- * Executes ethtool set cmd & sets phy mode
- *
- */
-static void emac_dump_regs(struct emac_priv *priv)
-{
- struct device *emac_dev = &priv->ndev->dev;
-
- /* Print important registers in EMAC */
- dev_info(emac_dev, "EMAC Basic registers\n");
- if (priv->version == EMAC_VERSION_1) {
- dev_info(emac_dev, "EMAC: EWCTL: %08X, EWINTTCNT: %08X\n",
- emac_ctrl_read(EMAC_CTRL_EWCTL),
- emac_ctrl_read(EMAC_CTRL_EWINTTCNT));
- }
- dev_info(emac_dev, "EMAC: EmuControl:%08X, FifoControl: %08X\n",
- emac_read(EMAC_EMCONTROL), emac_read(EMAC_FIFOCONTROL));
- dev_info(emac_dev, "EMAC: MBPEnable:%08X, RXUnicastSet: %08X, "\
- "RXMaxLen=%08X\n", emac_read(EMAC_RXMBPENABLE),
- emac_read(EMAC_RXUNICASTSET), emac_read(EMAC_RXMAXLEN));
- dev_info(emac_dev, "EMAC: MacControl:%08X, MacStatus: %08X, "\
- "MacConfig=%08X\n", emac_read(EMAC_MACCONTROL),
- emac_read(EMAC_MACSTATUS), emac_read(EMAC_MACCONFIG));
- dev_info(emac_dev, "EMAC Statistics\n");
- dev_info(emac_dev, "EMAC: rx_good_frames:%d\n",
- emac_read(EMAC_RXGOODFRAMES));
- dev_info(emac_dev, "EMAC: rx_broadcast_frames:%d\n",
- emac_read(EMAC_RXBCASTFRAMES));
- dev_info(emac_dev, "EMAC: rx_multicast_frames:%d\n",
- emac_read(EMAC_RXMCASTFRAMES));
- dev_info(emac_dev, "EMAC: rx_pause_frames:%d\n",
- emac_read(EMAC_RXPAUSEFRAMES));
- dev_info(emac_dev, "EMAC: rx_crcerrors:%d\n",
- emac_read(EMAC_RXCRCERRORS));
- dev_info(emac_dev, "EMAC: rx_align_code_errors:%d\n",
- emac_read(EMAC_RXALIGNCODEERRORS));
- dev_info(emac_dev, "EMAC: rx_oversized_frames:%d\n",
- emac_read(EMAC_RXOVERSIZED));
- dev_info(emac_dev, "EMAC: rx_jabber_frames:%d\n",
- emac_read(EMAC_RXJABBER));
- dev_info(emac_dev, "EMAC: rx_undersized_frames:%d\n",
- emac_read(EMAC_RXUNDERSIZED));
- dev_info(emac_dev, "EMAC: rx_fragments:%d\n",
- emac_read(EMAC_RXFRAGMENTS));
- dev_info(emac_dev, "EMAC: rx_filtered_frames:%d\n",
- emac_read(EMAC_RXFILTERED));
- dev_info(emac_dev, "EMAC: rx_qos_filtered_frames:%d\n",
- emac_read(EMAC_RXQOSFILTERED));
- dev_info(emac_dev, "EMAC: rx_octets:%d\n",
- emac_read(EMAC_RXOCTETS));
- dev_info(emac_dev, "EMAC: tx_goodframes:%d\n",
- emac_read(EMAC_TXGOODFRAMES));
- dev_info(emac_dev, "EMAC: tx_bcastframes:%d\n",
- emac_read(EMAC_TXBCASTFRAMES));
- dev_info(emac_dev, "EMAC: tx_mcastframes:%d\n",
- emac_read(EMAC_TXMCASTFRAMES));
- dev_info(emac_dev, "EMAC: tx_pause_frames:%d\n",
- emac_read(EMAC_TXPAUSEFRAMES));
- dev_info(emac_dev, "EMAC: tx_deferred_frames:%d\n",
- emac_read(EMAC_TXDEFERRED));
- dev_info(emac_dev, "EMAC: tx_collision_frames:%d\n",
- emac_read(EMAC_TXCOLLISION));
- dev_info(emac_dev, "EMAC: tx_single_coll_frames:%d\n",
- emac_read(EMAC_TXSINGLECOLL));
- dev_info(emac_dev, "EMAC: tx_mult_coll_frames:%d\n",
- emac_read(EMAC_TXMULTICOLL));
- dev_info(emac_dev, "EMAC: tx_excessive_collisions:%d\n",
- emac_read(EMAC_TXEXCESSIVECOLL));
- dev_info(emac_dev, "EMAC: tx_late_collisions:%d\n",
- emac_read(EMAC_TXLATECOLL));
- dev_info(emac_dev, "EMAC: tx_underrun:%d\n",
- emac_read(EMAC_TXUNDERRUN));
- dev_info(emac_dev, "EMAC: tx_carrier_sense_errors:%d\n",
- emac_read(EMAC_TXCARRIERSENSE));
- dev_info(emac_dev, "EMAC: tx_octets:%d\n",
- emac_read(EMAC_TXOCTETS));
- dev_info(emac_dev, "EMAC: net_octets:%d\n",
- emac_read(EMAC_NETOCTETS));
- dev_info(emac_dev, "EMAC: rx_sof_overruns:%d\n",
- emac_read(EMAC_RXSOFOVERRUNS));
- dev_info(emac_dev, "EMAC: rx_mof_overruns:%d\n",
- emac_read(EMAC_RXMOFOVERRUNS));
- dev_info(emac_dev, "EMAC: rx_dma_overruns:%d\n",
- emac_read(EMAC_RXDMAOVERRUNS));
-
- cpdma_ctlr_dump(priv->dma);
-}
-
-/**
* emac_get_drvinfo - Get EMAC driver information
* @ndev: The DaVinci EMAC network adapter
* @info: ethtool info structure containing name and version
@@ -486,43 +394,6 @@ static void emac_get_drvinfo(struct net_device *ndev,
}
/**
- * emac_get_settings - Get EMAC settings
- * @ndev: The DaVinci EMAC network adapter
- * @ecmd: ethtool command
- *
- * Executes ethool get command
- *
- */
-static int emac_get_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
-{
- struct emac_priv *priv = netdev_priv(ndev);
- if (priv->phydev)
- return phy_ethtool_gset(priv->phydev, ecmd);
- else
- return -EOPNOTSUPP;
-
-}
-
-/**
- * emac_set_settings - Set EMAC settings
- * @ndev: The DaVinci EMAC network adapter
- * @ecmd: ethtool command
- *
- * Executes ethool set command
- *
- */
-static int emac_set_settings(struct net_device *ndev, struct ethtool_cmd *ecmd)
-{
- struct emac_priv *priv = netdev_priv(ndev);
- if (priv->phydev)
- return phy_ethtool_sset(priv->phydev, ecmd);
- else
- return -EOPNOTSUPP;
-
-}
-
-/**
* emac_get_coalesce - Get interrupt coalesce settings for this device
* @ndev : The DaVinci EMAC network adapter
* @coal : ethtool coalesce settings structure
@@ -625,12 +496,12 @@ static int emac_set_coalesce(struct net_device *ndev,
*/
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = emac_get_drvinfo,
- .get_settings = emac_get_settings,
- .set_settings = emac_set_settings,
.get_link = ethtool_op_get_link,
.get_coalesce = emac_get_coalesce,
.set_coalesce = emac_set_coalesce,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/**
@@ -651,8 +522,8 @@ static void emac_update_phystatus(struct emac_priv *priv)
mac_control = emac_read(EMAC_MACCONTROL);
cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
DUPLEX_FULL : DUPLEX_HALF;
- if (priv->phydev)
- new_duplex = priv->phydev->duplex;
+ if (ndev->phydev)
+ new_duplex = ndev->phydev->duplex;
else
new_duplex = DUPLEX_FULL;
@@ -1134,8 +1005,6 @@ static void emac_dev_tx_timeout(struct net_device *ndev)
if (netif_msg_tx_err(priv))
dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX");
- emac_dump_regs(priv);
-
ndev->stats.tx_errors++;
emac_int_disable(priv);
cpdma_chan_stop(priv->txchan);
@@ -1454,7 +1323,7 @@ static void emac_poll_controller(struct net_device *ndev)
static void emac_adjust_link(struct net_device *ndev)
{
struct emac_priv *priv = netdev_priv(ndev);
- struct phy_device *phydev = priv->phydev;
+ struct phy_device *phydev = ndev->phydev;
unsigned long flags;
int new_state = 0;
@@ -1483,7 +1352,7 @@ static void emac_adjust_link(struct net_device *ndev)
}
if (new_state) {
emac_update_phystatus(priv);
- phy_print_status(priv->phydev);
+ phy_print_status(ndev->phydev);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1505,15 +1374,13 @@ static void emac_adjust_link(struct net_device *ndev)
*/
static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
{
- struct emac_priv *priv = netdev_priv(ndev);
-
if (!(netif_running(ndev)))
return -EINVAL;
/* TODO: Add phy read and write and private statistics get feature */
- if (priv->phydev)
- return phy_mii_ioctl(priv->phydev, ifrq, cmd);
+ if (ndev->phydev)
+ return phy_mii_ioctl(ndev->phydev, ifrq, cmd);
else
return -EOPNOTSUPP;
}
@@ -1542,6 +1409,7 @@ static int emac_dev_open(struct net_device *ndev)
int res_num = 0, irq_num = 0;
int i = 0;
struct emac_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = NULL;
ret = pm_runtime_get_sync(&priv->pdev->dev);
if (ret < 0) {
@@ -1607,12 +1475,10 @@ static int emac_dev_open(struct net_device *ndev)
cpdma_ctlr_start(priv->dma);
- priv->phydev = NULL;
-
if (priv->phy_node) {
- priv->phydev = of_phy_connect(ndev, priv->phy_node,
- &emac_adjust_link, 0, 0);
- if (!priv->phydev) {
+ phydev = of_phy_connect(ndev, priv->phy_node,
+ &emac_adjust_link, 0, 0);
+ if (!phydev) {
dev_err(emac_dev, "could not connect to phy %s\n",
priv->phy_node->full_name);
ret = -ENODEV;
@@ -1621,7 +1487,7 @@ static int emac_dev_open(struct net_device *ndev)
}
/* use the first phy on the bus if pdata did not give us a phy id */
- if (!priv->phydev && !priv->phy_id) {
+ if (!phydev && !priv->phy_id) {
struct device *phy;
phy = bus_find_device(&mdio_bus_type, NULL, NULL,
@@ -1630,16 +1496,15 @@ static int emac_dev_open(struct net_device *ndev)
priv->phy_id = dev_name(phy);
}
- if (!priv->phydev && priv->phy_id && *priv->phy_id) {
- priv->phydev = phy_connect(ndev, priv->phy_id,
- &emac_adjust_link,
- PHY_INTERFACE_MODE_MII);
+ if (!phydev && priv->phy_id && *priv->phy_id) {
+ phydev = phy_connect(ndev, priv->phy_id,
+ &emac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
- if (IS_ERR(priv->phydev)) {
+ if (IS_ERR(phydev)) {
dev_err(emac_dev, "could not connect to phy %s\n",
priv->phy_id);
- ret = PTR_ERR(priv->phydev);
- priv->phydev = NULL;
+ ret = PTR_ERR(phydev);
goto err;
}
@@ -1647,10 +1512,10 @@ static int emac_dev_open(struct net_device *ndev)
priv->speed = 0;
priv->duplex = ~0;
- phy_attached_info(priv->phydev);
+ phy_attached_info(phydev);
}
- if (!priv->phydev) {
+ if (!phydev) {
/* No PHY , fix the link, speed and duplex settings */
dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
priv->link = 1;
@@ -1659,14 +1524,11 @@ static int emac_dev_open(struct net_device *ndev)
emac_update_phystatus(priv);
}
- if (!netif_running(ndev)) /* debug only - to avoid compiler warning */
- emac_dump_regs(priv);
-
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name);
- if (priv->phydev)
- phy_start(priv->phydev);
+ if (phydev)
+ phy_start(phydev);
return 0;
@@ -1717,8 +1579,8 @@ static int emac_dev_stop(struct net_device *ndev)
cpdma_ctlr_stop(priv->dma);
emac_write(EMAC_SOFTRESET, 1);
- if (priv->phydev)
- phy_disconnect(priv->phydev);
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
/* Free IRQ */
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
@@ -2102,6 +1964,7 @@ static int davinci_emac_remove(struct platform_device *pdev)
cpdma_ctlr_destroy(priv->dma);
unregister_netdev(ndev);
+ of_node_put(priv->phy_node);
pm_runtime_disable(&pdev->dev);
free_netdev(ndev);
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 4e7c9b9b042a..33df340db1f1 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -53,6 +53,10 @@
#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
+struct davinci_mdio_of_param {
+ int autosuspend_delay_ms;
+};
+
struct davinci_mdio_regs {
u32 version;
u32 control;
@@ -90,19 +94,19 @@ static const struct mdio_platform_data default_pdata = {
struct davinci_mdio_data {
struct mdio_platform_data pdata;
struct davinci_mdio_regs __iomem *regs;
- spinlock_t lock;
struct clk *clk;
struct device *dev;
struct mii_bus *bus;
- bool suspended;
+ bool active_in_suspend;
unsigned long access_time; /* jiffies */
/* Indicates that driver shouldn't modify phy_mask in case
* if MDIO bus is registered from DT.
*/
bool skip_scan;
+ u32 clk_div;
};
-static void __davinci_mdio_reset(struct davinci_mdio_data *data)
+static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
{
u32 mdio_in, div, mdio_out_khz, access_time;
@@ -111,9 +115,7 @@ static void __davinci_mdio_reset(struct davinci_mdio_data *data)
if (div > CONTROL_MAX_DIV)
div = CONTROL_MAX_DIV;
- /* set enable and clock divider */
- __raw_writel(div | CONTROL_ENABLE, &data->regs->control);
-
+ data->clk_div = div;
/*
* One mdio transaction consists of:
* 32 bits of preamble
@@ -134,12 +136,23 @@ static void __davinci_mdio_reset(struct davinci_mdio_data *data)
data->access_time = 1;
}
+static void davinci_mdio_enable(struct davinci_mdio_data *data)
+{
+ /* set enable and clock divider */
+ __raw_writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
+}
+
static int davinci_mdio_reset(struct mii_bus *bus)
{
struct davinci_mdio_data *data = bus->priv;
u32 phy_mask, ver;
+ int ret;
- __davinci_mdio_reset(data);
+ ret = pm_runtime_get_sync(data->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(data->dev);
+ return ret;
+ }
/* wait for scan logic to settle */
msleep(PHY_MAX_ADDR * data->access_time);
@@ -150,7 +163,7 @@ static int davinci_mdio_reset(struct mii_bus *bus)
(ver >> 8) & 0xff, ver & 0xff);
if (data->skip_scan)
- return 0;
+ goto done;
/* get phy mask from the alive register */
phy_mask = __raw_readl(&data->regs->alive);
@@ -165,6 +178,10 @@ static int davinci_mdio_reset(struct mii_bus *bus)
}
data->bus->phy_mask = phy_mask;
+done:
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+
return 0;
}
@@ -190,7 +207,7 @@ static inline int wait_for_user_access(struct davinci_mdio_data *data)
* operation
*/
dev_warn(data->dev, "resetting idled controller\n");
- __davinci_mdio_reset(data);
+ davinci_mdio_enable(data);
return -EAGAIN;
}
@@ -225,11 +242,10 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
- spin_lock(&data->lock);
-
- if (data->suspended) {
- spin_unlock(&data->lock);
- return -ENODEV;
+ ret = pm_runtime_get_sync(data->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(data->dev);
+ return ret;
}
reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
@@ -255,8 +271,8 @@ static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
break;
}
- spin_unlock(&data->lock);
-
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
return ret;
}
@@ -270,11 +286,10 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
return -EINVAL;
- spin_lock(&data->lock);
-
- if (data->suspended) {
- spin_unlock(&data->lock);
- return -ENODEV;
+ ret = pm_runtime_get_sync(data->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(data->dev);
+ return ret;
}
reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
@@ -295,9 +310,10 @@ static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
break;
}
- spin_unlock(&data->lock);
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
- return 0;
+ return ret;
}
#if IS_ENABLED(CONFIG_OF)
@@ -320,6 +336,19 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
}
#endif
+#if IS_ENABLED(CONFIG_OF)
+static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
+ .autosuspend_delay_ms = 100,
+};
+
+static const struct of_device_id davinci_mdio_of_mtable[] = {
+ { .compatible = "ti,davinci_mdio", },
+ { .compatible = "ti,cpsw-mdio", .data = &of_cpsw_mdio_data},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
+#endif
+
static int davinci_mdio_probe(struct platform_device *pdev)
{
struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -328,6 +357,7 @@ static int davinci_mdio_probe(struct platform_device *pdev)
struct resource *res;
struct phy_device *phy;
int ret, addr;
+ int autosuspend_delay_ms = -1;
data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
@@ -340,9 +370,22 @@ static int davinci_mdio_probe(struct platform_device *pdev)
}
if (dev->of_node) {
- if (davinci_mdio_probe_dt(&data->pdata, pdev))
- data->pdata = default_pdata;
+ const struct of_device_id *of_id;
+
+ ret = davinci_mdio_probe_dt(&data->pdata, pdev);
+ if (ret)
+ return ret;
snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+
+ of_id = of_match_device(davinci_mdio_of_mtable, &pdev->dev);
+ if (of_id) {
+ const struct davinci_mdio_of_param *of_mdio_data;
+
+ of_mdio_data = of_id->data;
+ if (of_mdio_data)
+ autosuspend_delay_ms =
+ of_mdio_data->autosuspend_delay_ms;
+ }
} else {
data->pdata = pdata ? (*pdata) : default_pdata;
snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
@@ -356,26 +399,25 @@ static int davinci_mdio_probe(struct platform_device *pdev)
data->bus->parent = dev;
data->bus->priv = data;
- pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
data->clk = devm_clk_get(dev, "fck");
if (IS_ERR(data->clk)) {
dev_err(dev, "failed to get device clock\n");
- ret = PTR_ERR(data->clk);
- data->clk = NULL;
- goto bail_out;
+ return PTR_ERR(data->clk);
}
dev_set_drvdata(dev, data);
data->dev = dev;
- spin_lock_init(&data->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
data->regs = devm_ioremap_resource(dev, res);
- if (IS_ERR(data->regs)) {
- ret = PTR_ERR(data->regs);
- goto bail_out;
- }
+ if (IS_ERR(data->regs))
+ return PTR_ERR(data->regs);
+
+ davinci_mdio_init_clk(data);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, autosuspend_delay_ms);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
/* register the mii bus
* Create PHYs from DT only in case if PHY child nodes are explicitly
@@ -404,9 +446,8 @@ static int davinci_mdio_probe(struct platform_device *pdev)
return 0;
bail_out:
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
-
return ret;
}
@@ -417,29 +458,47 @@ static int davinci_mdio_remove(struct platform_device *pdev)
if (data->bus)
mdiobus_unregister(data->bus);
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int davinci_mdio_suspend(struct device *dev)
+#ifdef CONFIG_PM
+static int davinci_mdio_runtime_suspend(struct device *dev)
{
struct davinci_mdio_data *data = dev_get_drvdata(dev);
u32 ctrl;
- spin_lock(&data->lock);
-
/* shutdown the scan state machine */
ctrl = __raw_readl(&data->regs->control);
ctrl &= ~CONTROL_ENABLE;
__raw_writel(ctrl, &data->regs->control);
wait_for_idle(data);
- data->suspended = true;
- spin_unlock(&data->lock);
- pm_runtime_put_sync(data->dev);
+ return 0;
+}
+
+static int davinci_mdio_runtime_resume(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+
+ davinci_mdio_enable(data);
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int davinci_mdio_suspend(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ data->active_in_suspend = !pm_runtime_status_suspended(dev);
+ if (data->active_in_suspend)
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0)
+ return ret;
/* Select sleep pin state */
pinctrl_pm_select_sleep_state(dev);
@@ -454,31 +513,19 @@ static int davinci_mdio_resume(struct device *dev)
/* Select default pin state */
pinctrl_pm_select_default_state(dev);
- pm_runtime_get_sync(data->dev);
-
- spin_lock(&data->lock);
- /* restart the scan state machine */
- __davinci_mdio_reset(data);
-
- data->suspended = false;
- spin_unlock(&data->lock);
+ if (data->active_in_suspend)
+ pm_runtime_force_resume(dev);
return 0;
}
#endif
static const struct dev_pm_ops davinci_mdio_pm_ops = {
+ SET_RUNTIME_PM_OPS(davinci_mdio_runtime_suspend,
+ davinci_mdio_runtime_resume, NULL)
SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
};
-#if IS_ENABLED(CONFIG_OF)
-static const struct of_device_id davinci_mdio_of_mtable[] = {
- { .compatible = "ti,davinci_mdio", },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
-#endif
-
static struct platform_driver davinci_mdio_driver = {
.driver = {
.name = "davinci_mdio",
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 1d0942c53120..32516661f180 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -1272,7 +1272,7 @@ static int netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
if (ret)
goto drop;
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
/* Check Tx pool count & stop subqueue if needed */
desc_count = knav_pool_count(netcp->tx_pool);
@@ -1788,7 +1788,7 @@ static void netcp_ndo_tx_timeout(struct net_device *ndev)
dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
netif_tx_wake_all_queues(ndev);
}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index a274cd49afe9..ece0ea0f6b38 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -1007,7 +1007,7 @@ static void tlan_tx_timeout(struct net_device *dev)
tlan_reset_lists(dev);
tlan_read_and_clear_stats(dev, TLAN_IGNORE);
tlan_reset_adapter(dev);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
@@ -1651,7 +1651,6 @@ static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
dma_addr_t head_list_phys;
u32 ack = 1;
- host_int = 0;
if (priv->tlan_rev < 0x30) {
TLAN_DBG(TLAN_DEBUG_TX,
"TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 0a15acc075b3..11213a38c795 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -462,7 +462,7 @@ static void tile_tx_timestamp(struct sk_buff *skb, int instance)
if (unlikely((shtx->tx_flags & SKBTX_HW_TSTAMP) != 0)) {
struct mpipe_data *md = &mpipe_data[instance];
struct skb_shared_hwtstamps shhwtstamps;
- struct timespec ts;
+ struct timespec64 ts;
shtx->tx_flags |= SKBTX_IN_PROGRESS;
gxio_mpipe_get_timestamp(&md->context, &ts);
@@ -886,9 +886,9 @@ static struct ptp_clock_info ptp_mpipe_caps = {
/* Sync mPIPE's timestamp up with Linux system time and register PTP clock. */
static void register_ptp_clock(struct net_device *dev, struct mpipe_data *md)
{
- struct timespec ts;
+ struct timespec64 ts;
- getnstimeofday(&ts);
+ ktime_get_ts64(&ts);
gxio_mpipe_set_timestamp(&md->context, &ts);
mutex_init(&md->ptp_lock);
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 298e059d0498..4ef605a90247 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -588,7 +588,7 @@ static bool tile_net_lepp_free_comps(struct net_device *dev, bool all)
static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
{
if (!info->egress_timer_scheduled) {
- mod_timer_pinned(&info->egress_timer, jiffies + 1);
+ mod_timer(&info->egress_timer, jiffies + 1);
info->egress_timer_scheduled = true;
}
}
@@ -1004,7 +1004,7 @@ static void tile_net_register(void *dev_ptr)
BUG();
/* Initialize the egress timer. */
- init_timer(&info->egress_timer);
+ init_timer_pinned(&info->egress_timer);
info->egress_timer.data = (long)info;
info->egress_timer.function = tile_net_handle_egress_timer;
@@ -1883,7 +1883,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
/* Save the timestamp. */
- dev->trans_start = jiffies;
+ netif_trans_update(dev);
#ifdef TILE_NET_PARANOIA
@@ -2026,7 +2026,7 @@ static void tile_net_tx_timeout(struct net_device *dev)
{
PDEBUG("tile_net_tx_timeout()\n");
PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies,
- jiffies - dev->trans_start);
+ jiffies - dev_trans_start(dev));
/* XXX: ISSUE: This doesn't seem useful for us. */
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
index 743b18266a7c..446ea580ad42 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
@@ -1616,13 +1616,13 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
target->valid = 1;
target->eurus_index = i;
kfree(target->hwinfo);
- target->hwinfo = kzalloc(be16_to_cpu(scan_info->size),
+ target->hwinfo = kmemdup(scan_info,
+ be16_to_cpu(scan_info->size),
GFP_KERNEL);
if (!target->hwinfo)
continue;
/* copy hw scan info */
- memcpy(target->hwinfo, scan_info, be16_to_cpu(scan_info->size));
target->essid_len = strnlen(scan_info->essid,
sizeof(scan_info->essid));
target->rate_len = 0;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 67610270d171..36a6e8b54d94 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -705,7 +705,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
wmb();
descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
- card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
+ netif_trans_update(card->netdev); /* set netdev watchdog timer */
return 0;
}
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 54874783476a..5b01b3fa9fec 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -280,7 +280,7 @@ struct tc35815_regs {
* Descriptors
*/
-/* Frame descripter */
+/* Frame descriptor */
struct FDesc {
volatile __u32 FDNext;
volatile __u32 FDSystem;
@@ -288,7 +288,7 @@ struct FDesc {
volatile __u32 FDCtl;
};
-/* Buffer descripter */
+/* Buffer descriptor */
struct BDesc {
volatile __u32 BuffData;
volatile __u32 BDCtl;
@@ -296,7 +296,7 @@ struct BDesc {
#define FD_ALIGN 16
-/* Frame Descripter bit assign ---------------------------------------------- */
+/* Frame Descriptor bit assign ---------------------------------------------- */
#define FD_FDLength_MASK 0x0000FFFF /* Length MASK */
#define FD_BDCnt_MASK 0x001F0000 /* BD count MASK in FD */
#define FD_FrmOpt_MASK 0x7C000000 /* Frame option MASK */
@@ -309,7 +309,7 @@ struct BDesc {
#define FD_Next_EOL 0x00000001 /* FD EOL indicator */
#define FD_BDCnt_SHIFT 16
-/* Buffer Descripter bit assign --------------------------------------------- */
+/* Buffer Descriptor bit assign --------------------------------------------- */
#define BD_BuffLength_MASK 0x0000FFFF /* Receive Data Size */
#define BD_RxBDID_MASK 0x00FF0000 /* BD ID Number MASK */
#define BD_RxBDSeqN_MASK 0x7F000000 /* Rx BD Sequence Number */
@@ -405,7 +405,6 @@ struct tc35815_local {
spinlock_t rx_lock;
struct mii_bus *mii_bus;
- struct phy_device *phy_dev;
int duplex;
int speed;
int link;
@@ -539,7 +538,7 @@ static int tc_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 val)
static void tc_handle_link_change(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
- struct phy_device *phydev = lp->phy_dev;
+ struct phy_device *phydev = dev->phydev;
unsigned long flags;
int status_change = 0;
@@ -645,7 +644,6 @@ static int tc_mii_probe(struct net_device *dev)
lp->link = 0;
lp->speed = 0;
lp->duplex = -1;
- lp->phy_dev = phydev;
return 0;
}
@@ -853,7 +851,7 @@ static void tc35815_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct tc35815_local *lp = netdev_priv(dev);
- phy_disconnect(lp->phy_dev);
+ phy_disconnect(dev->phydev);
mdiobus_unregister(lp->mii_bus);
mdiobus_free(lp->mii_bus);
unregister_netdev(dev);
@@ -1143,8 +1141,8 @@ static void tc35815_restart(struct net_device *dev)
struct tc35815_local *lp = netdev_priv(dev);
int ret;
- if (lp->phy_dev) {
- ret = phy_init_hw(lp->phy_dev);
+ if (dev->phydev) {
+ ret = phy_init_hw(dev->phydev);
if (ret)
printk(KERN_ERR "%s: PHY init failed.\n", dev->name);
}
@@ -1236,7 +1234,7 @@ tc35815_open(struct net_device *dev)
netif_carrier_off(dev);
/* schedule a link state check */
- phy_start(lp->phy_dev);
+ phy_start(dev->phydev);
/* We are now ready to accept transmit requeusts from
* the queueing layer of the networking.
@@ -1387,7 +1385,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
if (status & Int_IntExBD) {
if (netif_msg_rx_err(lp))
dev_warn(&dev->dev,
- "Excessive Buffer Descriptiors (%#x).\n",
+ "Excessive Buffer Descriptors (%#x).\n",
status);
dev->stats.rx_length_errors++;
ret = 0;
@@ -1819,8 +1817,8 @@ tc35815_close(struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&lp->napi);
- if (lp->phy_dev)
- phy_stop(lp->phy_dev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
cancel_work_sync(&lp->restart_work);
/* Flush the Tx and disable Rx here. */
@@ -1946,24 +1944,6 @@ static void tc35815_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *
strlcpy(info->bus_info, pci_name(lp->pci_dev), sizeof(info->bus_info));
}
-static int tc35815_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct tc35815_local *lp = netdev_priv(dev);
-
- if (!lp->phy_dev)
- return -ENODEV;
- return phy_ethtool_gset(lp->phy_dev, cmd);
-}
-
-static int tc35815_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct tc35815_local *lp = netdev_priv(dev);
-
- if (!lp->phy_dev)
- return -ENODEV;
- return phy_ethtool_sset(lp->phy_dev, cmd);
-}
-
static u32 tc35815_get_msglevel(struct net_device *dev)
{
struct tc35815_local *lp = netdev_priv(dev);
@@ -2013,25 +1993,23 @@ static void tc35815_get_strings(struct net_device *dev, u32 stringset, u8 *data)
static const struct ethtool_ops tc35815_ethtool_ops = {
.get_drvinfo = tc35815_get_drvinfo,
- .get_settings = tc35815_get_settings,
- .set_settings = tc35815_set_settings,
.get_link = ethtool_op_get_link,
.get_msglevel = tc35815_get_msglevel,
.set_msglevel = tc35815_set_msglevel,
.get_strings = tc35815_get_strings,
.get_sset_count = tc35815_get_sset_count,
.get_ethtool_stats = tc35815_get_ethtool_stats,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int tc35815_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
- struct tc35815_local *lp = netdev_priv(dev);
-
if (!netif_running(dev))
return -EINVAL;
- if (!lp->phy_dev)
+ if (!dev->phydev)
return -ENODEV;
- return phy_mii_ioctl(lp->phy_dev, rq, cmd);
+ return phy_mii_ioctl(dev->phydev, rq, cmd);
}
static void tc35815_chip_reset(struct net_device *dev)
@@ -2116,7 +2094,7 @@ static void tc35815_chip_init(struct net_device *dev)
if (lp->chiptype == TC35815_TX4939)
txctl &= ~Tx_EnLCarr;
/* WORKAROUND: ignore LostCrS in full duplex operation */
- if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL)
+ if (!dev->phydev || !lp->link || lp->duplex == DUPLEX_FULL)
txctl &= ~Tx_EnLCarr;
tc_writel(txctl, &tr->Tx_Ctl);
}
@@ -2132,8 +2110,8 @@ static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state)
if (!netif_running(dev))
return 0;
netif_device_detach(dev);
- if (lp->phy_dev)
- phy_stop(lp->phy_dev);
+ if (dev->phydev)
+ phy_stop(dev->phydev);
spin_lock_irqsave(&lp->lock, flags);
tc35815_chip_reset(dev);
spin_unlock_irqrestore(&lp->lock, flags);
@@ -2144,7 +2122,6 @@ static int tc35815_suspend(struct pci_dev *pdev, pm_message_t state)
static int tc35815_resume(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
- struct tc35815_local *lp = netdev_priv(dev);
pci_restore_state(pdev);
if (!netif_running(dev))
@@ -2152,8 +2129,8 @@ static int tc35815_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
tc35815_restart(dev);
netif_carrier_off(dev);
- if (lp->phy_dev)
- phy_start(lp->phy_dev);
+ if (dev->phydev)
+ phy_start(dev->phydev);
netif_device_attach(dev);
return 0;
}
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 520cf50a3d5a..8fd131207ee1 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = {
static void tsi108_timed_checker(unsigned long dev_ptr);
+#ifdef DEBUG
static void dump_eth_one(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
@@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev)
TSI_READ(TSI108_EC_RXESTAT),
TSI_READ(TSI108_EC_RXERR), data->rxpending);
}
+#endif
/* Synchronization is needed between the thread and up/down events.
* Note that the PHY is accessed through the same registers for both
@@ -1314,7 +1316,8 @@ static int tsi108_open(struct net_device *dev)
data->txring = dma_zalloc_coherent(NULL, txring_size, &data->txdma,
GFP_KERNEL);
if (!data->txring) {
- pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
+ pci_free_consistent(NULL, rxring_size, data->rxring,
+ data->rxdma);
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 2b7550c43f78..9d14731cdcb1 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -1758,7 +1758,7 @@ static void rhine_reset_task(struct work_struct *work)
spin_unlock_bh(&rp->lock);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev);
diff --git a/drivers/net/ethernet/wiznet/Kconfig b/drivers/net/ethernet/wiznet/Kconfig
index f98b91d21f33..1981e88c18dc 100644
--- a/drivers/net/ethernet/wiznet/Kconfig
+++ b/drivers/net/ethernet/wiznet/Kconfig
@@ -69,4 +69,18 @@ config WIZNET_BUS_ANY
Performance may decrease compared to explicitly selected bus mode.
endchoice
+config WIZNET_W5100_SPI
+ tristate "WIZnet W5100/W5200/W5500 Ethernet support for SPI mode"
+ depends on WIZNET_BUS_ANY && WIZNET_W5100
+ depends on SPI
+ ---help---
+ In SPI mode host system accesses registers using SPI protocol
+ (mode 0) on the SPI bus.
+
+ Performance decreases compared to other bus interface mode.
+ In W5100 SPI mode, burst READ/WRITE processing are not provided.
+
+ To compile this driver as a module, choose M here: the module
+ will be called w5100-spi.
+
endif # NET_VENDOR_WIZNET
diff --git a/drivers/net/ethernet/wiznet/Makefile b/drivers/net/ethernet/wiznet/Makefile
index c614535227e8..1e05e1a84208 100644
--- a/drivers/net/ethernet/wiznet/Makefile
+++ b/drivers/net/ethernet/wiznet/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_WIZNET_W5100) += w5100.o
+obj-$(CONFIG_WIZNET_W5100_SPI) += w5100-spi.o
obj-$(CONFIG_WIZNET_W5300) += w5300.o
diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c
new file mode 100644
index 000000000000..93a2d3c07303
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5100-spi.c
@@ -0,0 +1,466 @@
+/*
+ * Ethernet driver for the WIZnet W5100/W5200/W5500 chip.
+ *
+ * Copyright (C) 2016 Akinobu Mita <akinobu.mita@gmail.com>
+ *
+ * Licensed under the GPL-2 or later.
+ *
+ * Datasheet:
+ * http://www.wiznet.co.kr/wp-content/uploads/wiznethome/Chip/W5100/Document/W5100_Datasheet_v1.2.6.pdf
+ * http://wiznethome.cafe24.com/wp-content/uploads/wiznethome/Chip/W5200/Documents/W5200_DS_V140E.pdf
+ * http://wizwiki.net/wiki/lib/exe/fetch.php?media=products:w5500:w5500_ds_v106e_141230.pdf
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/spi/spi.h>
+
+#include "w5100.h"
+
+#define W5100_SPI_WRITE_OPCODE 0xf0
+#define W5100_SPI_READ_OPCODE 0x0f
+
+static int w5100_spi_read(struct net_device *ndev, u32 addr)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[3] = { W5100_SPI_READ_OPCODE, addr >> 8, addr & 0xff };
+ u8 data;
+ int ret;
+
+ ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1);
+
+ return ret ? ret : data;
+}
+
+static int w5100_spi_write(struct net_device *ndev, u32 addr, u8 data)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[4] = { W5100_SPI_WRITE_OPCODE, addr >> 8, addr & 0xff, data};
+
+ return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5100_spi_read16(struct net_device *ndev, u32 addr)
+{
+ u16 data;
+ int ret;
+
+ ret = w5100_spi_read(ndev, addr);
+ if (ret < 0)
+ return ret;
+ data = ret << 8;
+ ret = w5100_spi_read(ndev, addr + 1);
+
+ return ret < 0 ? ret : data | ret;
+}
+
+static int w5100_spi_write16(struct net_device *ndev, u32 addr, u16 data)
+{
+ int ret;
+
+ ret = w5100_spi_write(ndev, addr, data >> 8);
+ if (ret)
+ return ret;
+
+ return w5100_spi_write(ndev, addr + 1, data & 0xff);
+}
+
+static int w5100_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf,
+ int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ int ret = w5100_spi_read(ndev, addr + i);
+
+ if (ret < 0)
+ return ret;
+ buf[i] = ret;
+ }
+
+ return 0;
+}
+
+static int w5100_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf,
+ int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ int ret = w5100_spi_write(ndev, addr + i, buf[i]);
+
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct w5100_ops w5100_spi_ops = {
+ .may_sleep = true,
+ .chip_id = W5100,
+ .read = w5100_spi_read,
+ .write = w5100_spi_write,
+ .read16 = w5100_spi_read16,
+ .write16 = w5100_spi_write16,
+ .readbulk = w5100_spi_readbulk,
+ .writebulk = w5100_spi_writebulk,
+};
+
+#define W5200_SPI_WRITE_OPCODE 0x80
+
+struct w5200_spi_priv {
+ /* Serialize access to cmd_buf */
+ struct mutex cmd_lock;
+
+ /* DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ u8 cmd_buf[4] ____cacheline_aligned;
+};
+
+static struct w5200_spi_priv *w5200_spi_priv(struct net_device *ndev)
+{
+ return w5100_ops_priv(ndev);
+}
+
+static int w5200_spi_init(struct net_device *ndev)
+{
+ struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev);
+
+ mutex_init(&spi_priv->cmd_lock);
+
+ return 0;
+}
+
+static int w5200_spi_read(struct net_device *ndev, u32 addr)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 1 };
+ u8 data;
+ int ret;
+
+ ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1);
+
+ return ret ? ret : data;
+}
+
+static int w5200_spi_write(struct net_device *ndev, u32 addr, u8 data)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[5] = { addr >> 8, addr & 0xff, W5200_SPI_WRITE_OPCODE, 1, data };
+
+ return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5200_spi_read16(struct net_device *ndev, u32 addr)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[4] = { addr >> 8, addr & 0xff, 0, 2 };
+ __be16 data;
+ int ret;
+
+ ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, sizeof(data));
+
+ return ret ? ret : be16_to_cpu(data);
+}
+
+static int w5200_spi_write16(struct net_device *ndev, u32 addr, u16 data)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[6] = {
+ addr >> 8, addr & 0xff,
+ W5200_SPI_WRITE_OPCODE, 2,
+ data >> 8, data & 0xff
+ };
+
+ return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5200_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf,
+ int len)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev);
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = spi_priv->cmd_buf,
+ .len = sizeof(spi_priv->cmd_buf),
+ },
+ {
+ .rx_buf = buf,
+ .len = len,
+ },
+ };
+ int ret;
+
+ mutex_lock(&spi_priv->cmd_lock);
+
+ spi_priv->cmd_buf[0] = addr >> 8;
+ spi_priv->cmd_buf[1] = addr;
+ spi_priv->cmd_buf[2] = len >> 8;
+ spi_priv->cmd_buf[3] = len;
+ ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+ mutex_unlock(&spi_priv->cmd_lock);
+
+ return ret;
+}
+
+static int w5200_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf,
+ int len)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ struct w5200_spi_priv *spi_priv = w5200_spi_priv(ndev);
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = spi_priv->cmd_buf,
+ .len = sizeof(spi_priv->cmd_buf),
+ },
+ {
+ .tx_buf = buf,
+ .len = len,
+ },
+ };
+ int ret;
+
+ mutex_lock(&spi_priv->cmd_lock);
+
+ spi_priv->cmd_buf[0] = addr >> 8;
+ spi_priv->cmd_buf[1] = addr;
+ spi_priv->cmd_buf[2] = W5200_SPI_WRITE_OPCODE | (len >> 8);
+ spi_priv->cmd_buf[3] = len;
+ ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+ mutex_unlock(&spi_priv->cmd_lock);
+
+ return ret;
+}
+
+static const struct w5100_ops w5200_ops = {
+ .may_sleep = true,
+ .chip_id = W5200,
+ .read = w5200_spi_read,
+ .write = w5200_spi_write,
+ .read16 = w5200_spi_read16,
+ .write16 = w5200_spi_write16,
+ .readbulk = w5200_spi_readbulk,
+ .writebulk = w5200_spi_writebulk,
+ .init = w5200_spi_init,
+};
+
+#define W5500_SPI_BLOCK_SELECT(addr) (((addr) >> 16) & 0x1f)
+#define W5500_SPI_READ_CONTROL(addr) (W5500_SPI_BLOCK_SELECT(addr) << 3)
+#define W5500_SPI_WRITE_CONTROL(addr) \
+ ((W5500_SPI_BLOCK_SELECT(addr) << 3) | BIT(2))
+
+struct w5500_spi_priv {
+ /* Serialize access to cmd_buf */
+ struct mutex cmd_lock;
+
+ /* DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ u8 cmd_buf[3] ____cacheline_aligned;
+};
+
+static struct w5500_spi_priv *w5500_spi_priv(struct net_device *ndev)
+{
+ return w5100_ops_priv(ndev);
+}
+
+static int w5500_spi_init(struct net_device *ndev)
+{
+ struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev);
+
+ mutex_init(&spi_priv->cmd_lock);
+
+ return 0;
+}
+
+static int w5500_spi_read(struct net_device *ndev, u32 addr)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[3] = {
+ addr >> 8,
+ addr,
+ W5500_SPI_READ_CONTROL(addr)
+ };
+ u8 data;
+ int ret;
+
+ ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, 1);
+
+ return ret ? ret : data;
+}
+
+static int w5500_spi_write(struct net_device *ndev, u32 addr, u8 data)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[4] = {
+ addr >> 8,
+ addr,
+ W5500_SPI_WRITE_CONTROL(addr),
+ data
+ };
+
+ return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5500_spi_read16(struct net_device *ndev, u32 addr)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[3] = {
+ addr >> 8,
+ addr,
+ W5500_SPI_READ_CONTROL(addr)
+ };
+ __be16 data;
+ int ret;
+
+ ret = spi_write_then_read(spi, cmd, sizeof(cmd), &data, sizeof(data));
+
+ return ret ? ret : be16_to_cpu(data);
+}
+
+static int w5500_spi_write16(struct net_device *ndev, u32 addr, u16 data)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ u8 cmd[5] = {
+ addr >> 8,
+ addr,
+ W5500_SPI_WRITE_CONTROL(addr),
+ data >> 8,
+ data
+ };
+
+ return spi_write_then_read(spi, cmd, sizeof(cmd), NULL, 0);
+}
+
+static int w5500_spi_readbulk(struct net_device *ndev, u32 addr, u8 *buf,
+ int len)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev);
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = spi_priv->cmd_buf,
+ .len = sizeof(spi_priv->cmd_buf),
+ },
+ {
+ .rx_buf = buf,
+ .len = len,
+ },
+ };
+ int ret;
+
+ mutex_lock(&spi_priv->cmd_lock);
+
+ spi_priv->cmd_buf[0] = addr >> 8;
+ spi_priv->cmd_buf[1] = addr;
+ spi_priv->cmd_buf[2] = W5500_SPI_READ_CONTROL(addr);
+ ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+ mutex_unlock(&spi_priv->cmd_lock);
+
+ return ret;
+}
+
+static int w5500_spi_writebulk(struct net_device *ndev, u32 addr, const u8 *buf,
+ int len)
+{
+ struct spi_device *spi = to_spi_device(ndev->dev.parent);
+ struct w5500_spi_priv *spi_priv = w5500_spi_priv(ndev);
+ struct spi_transfer xfer[] = {
+ {
+ .tx_buf = spi_priv->cmd_buf,
+ .len = sizeof(spi_priv->cmd_buf),
+ },
+ {
+ .tx_buf = buf,
+ .len = len,
+ },
+ };
+ int ret;
+
+ mutex_lock(&spi_priv->cmd_lock);
+
+ spi_priv->cmd_buf[0] = addr >> 8;
+ spi_priv->cmd_buf[1] = addr;
+ spi_priv->cmd_buf[2] = W5500_SPI_WRITE_CONTROL(addr);
+ ret = spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer));
+
+ mutex_unlock(&spi_priv->cmd_lock);
+
+ return ret;
+}
+
+static const struct w5100_ops w5500_ops = {
+ .may_sleep = true,
+ .chip_id = W5500,
+ .read = w5500_spi_read,
+ .write = w5500_spi_write,
+ .read16 = w5500_spi_read16,
+ .write16 = w5500_spi_write16,
+ .readbulk = w5500_spi_readbulk,
+ .writebulk = w5500_spi_writebulk,
+ .init = w5500_spi_init,
+};
+
+static int w5100_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ const struct w5100_ops *ops;
+ int priv_size;
+ const void *mac = of_get_mac_address(spi->dev.of_node);
+
+ switch (id->driver_data) {
+ case W5100:
+ ops = &w5100_spi_ops;
+ priv_size = 0;
+ break;
+ case W5200:
+ ops = &w5200_ops;
+ priv_size = sizeof(struct w5200_spi_priv);
+ break;
+ case W5500:
+ ops = &w5500_ops;
+ priv_size = sizeof(struct w5500_spi_priv);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return w5100_probe(&spi->dev, ops, priv_size, mac, spi->irq, -EINVAL);
+}
+
+static int w5100_spi_remove(struct spi_device *spi)
+{
+ return w5100_remove(&spi->dev);
+}
+
+static const struct spi_device_id w5100_spi_ids[] = {
+ { "w5100", W5100 },
+ { "w5200", W5200 },
+ { "w5500", W5500 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, w5100_spi_ids);
+
+static struct spi_driver w5100_spi_driver = {
+ .driver = {
+ .name = "w5100",
+ .pm = &w5100_pm_ops,
+ },
+ .probe = w5100_spi_probe,
+ .remove = w5100_spi_remove,
+ .id_table = w5100_spi_ids,
+};
+module_spi_driver(w5100_spi_driver);
+
+MODULE_DESCRIPTION("WIZnet W5100/W5200/W5500 Ethernet driver for SPI mode");
+MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 8b282d0b169c..37ab46cdbec4 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -27,6 +27,8 @@
#include <linux/irq.h>
#include <linux/gpio.h>
+#include "w5100.h"
+
#define DRV_NAME "w5100"
#define DRV_VERSION "2012-04-04"
@@ -36,7 +38,7 @@ MODULE_ALIAS("platform:"DRV_NAME);
MODULE_LICENSE("GPL");
/*
- * Registers
+ * W5100/W5200/W5500 common registers
*/
#define W5100_COMMON_REGS 0x0000
#define W5100_MR 0x0000 /* Mode Register */
@@ -46,55 +48,115 @@ MODULE_LICENSE("GPL");
#define MR_IND 0x01 /* Indirect mode */
#define W5100_SHAR 0x0009 /* Source MAC address */
#define W5100_IR 0x0015 /* Interrupt Register */
-#define W5100_IMR 0x0016 /* Interrupt Mask Register */
-#define IR_S0 0x01 /* S0 interrupt */
-#define W5100_RTR 0x0017 /* Retry Time-value Register */
-#define RTR_DEFAULT 2000 /* =0x07d0 (2000) */
-#define W5100_RMSR 0x001a /* Receive Memory Size */
-#define W5100_TMSR 0x001b /* Transmit Memory Size */
#define W5100_COMMON_REGS_LEN 0x0040
-#define W5100_S0_REGS 0x0400
-#define W5100_S0_MR 0x0400 /* S0 Mode Register */
-#define S0_MR_MACRAW 0x04 /* MAC RAW mode (promiscuous) */
-#define S0_MR_MACRAW_MF 0x44 /* MAC RAW mode (filtered) */
-#define W5100_S0_CR 0x0401 /* S0 Command Register */
+#define W5100_Sn_MR 0x0000 /* Sn Mode Register */
+#define W5100_Sn_CR 0x0001 /* Sn Command Register */
+#define W5100_Sn_IR 0x0002 /* Sn Interrupt Register */
+#define W5100_Sn_SR 0x0003 /* Sn Status Register */
+#define W5100_Sn_TX_FSR 0x0020 /* Sn Transmit free memory size */
+#define W5100_Sn_TX_RD 0x0022 /* Sn Transmit memory read pointer */
+#define W5100_Sn_TX_WR 0x0024 /* Sn Transmit memory write pointer */
+#define W5100_Sn_RX_RSR 0x0026 /* Sn Receive free memory size */
+#define W5100_Sn_RX_RD 0x0028 /* Sn Receive memory read pointer */
+
+#define S0_REGS(priv) ((priv)->s0_regs)
+
+#define W5100_S0_MR(priv) (S0_REGS(priv) + W5100_Sn_MR)
+#define S0_MR_MACRAW 0x04 /* MAC RAW mode */
+#define S0_MR_MF 0x40 /* MAC Filter for W5100 and W5200 */
+#define W5500_S0_MR_MF 0x80 /* MAC Filter for W5500 */
+#define W5100_S0_CR(priv) (S0_REGS(priv) + W5100_Sn_CR)
#define S0_CR_OPEN 0x01 /* OPEN command */
#define S0_CR_CLOSE 0x10 /* CLOSE command */
#define S0_CR_SEND 0x20 /* SEND command */
#define S0_CR_RECV 0x40 /* RECV command */
-#define W5100_S0_IR 0x0402 /* S0 Interrupt Register */
+#define W5100_S0_IR(priv) (S0_REGS(priv) + W5100_Sn_IR)
#define S0_IR_SENDOK 0x10 /* complete sending */
#define S0_IR_RECV 0x04 /* receiving data */
-#define W5100_S0_SR 0x0403 /* S0 Status Register */
+#define W5100_S0_SR(priv) (S0_REGS(priv) + W5100_Sn_SR)
#define S0_SR_MACRAW 0x42 /* mac raw mode */
-#define W5100_S0_TX_FSR 0x0420 /* S0 Transmit free memory size */
-#define W5100_S0_TX_RD 0x0422 /* S0 Transmit memory read pointer */
-#define W5100_S0_TX_WR 0x0424 /* S0 Transmit memory write pointer */
-#define W5100_S0_RX_RSR 0x0426 /* S0 Receive free memory size */
-#define W5100_S0_RX_RD 0x0428 /* S0 Receive memory read pointer */
+#define W5100_S0_TX_FSR(priv) (S0_REGS(priv) + W5100_Sn_TX_FSR)
+#define W5100_S0_TX_RD(priv) (S0_REGS(priv) + W5100_Sn_TX_RD)
+#define W5100_S0_TX_WR(priv) (S0_REGS(priv) + W5100_Sn_TX_WR)
+#define W5100_S0_RX_RSR(priv) (S0_REGS(priv) + W5100_Sn_RX_RSR)
+#define W5100_S0_RX_RD(priv) (S0_REGS(priv) + W5100_Sn_RX_RD)
+
#define W5100_S0_REGS_LEN 0x0040
+/*
+ * W5100 and W5200 common registers
+ */
+#define W5100_IMR 0x0016 /* Interrupt Mask Register */
+#define IR_S0 0x01 /* S0 interrupt */
+#define W5100_RTR 0x0017 /* Retry Time-value Register */
+#define RTR_DEFAULT 2000 /* =0x07d0 (2000) */
+
+/*
+ * W5100 specific register and memory
+ */
+#define W5100_RMSR 0x001a /* Receive Memory Size */
+#define W5100_TMSR 0x001b /* Transmit Memory Size */
+
+#define W5100_S0_REGS 0x0400
+
#define W5100_TX_MEM_START 0x4000
-#define W5100_TX_MEM_END 0x5fff
-#define W5100_TX_MEM_MASK 0x1fff
+#define W5100_TX_MEM_SIZE 0x2000
#define W5100_RX_MEM_START 0x6000
-#define W5100_RX_MEM_END 0x7fff
-#define W5100_RX_MEM_MASK 0x1fff
+#define W5100_RX_MEM_SIZE 0x2000
+
+/*
+ * W5200 specific register and memory
+ */
+#define W5200_S0_REGS 0x4000
+
+#define W5200_Sn_RXMEM_SIZE(n) (0x401e + (n) * 0x0100) /* Sn RX Memory Size */
+#define W5200_Sn_TXMEM_SIZE(n) (0x401f + (n) * 0x0100) /* Sn TX Memory Size */
+
+#define W5200_TX_MEM_START 0x8000
+#define W5200_TX_MEM_SIZE 0x4000
+#define W5200_RX_MEM_START 0xc000
+#define W5200_RX_MEM_SIZE 0x4000
+
+/*
+ * W5500 specific register and memory
+ *
+ * W5500 register and memory are organized by multiple blocks. Each one is
+ * selected by 16bits offset address and 5bits block select bits. So we
+ * encode it into 32bits address. (lower 16bits is offset address and
+ * upper 16bits is block select bits)
+ */
+#define W5500_SIMR 0x0018 /* Socket Interrupt Mask Register */
+#define W5500_RTR 0x0019 /* Retry Time-value Register */
+
+#define W5500_S0_REGS 0x10000
+
+#define W5500_Sn_RXMEM_SIZE(n) \
+ (0x1001e + (n) * 0x40000) /* Sn RX Memory Size */
+#define W5500_Sn_TXMEM_SIZE(n) \
+ (0x1001f + (n) * 0x40000) /* Sn TX Memory Size */
+
+#define W5500_TX_MEM_START 0x20000
+#define W5500_TX_MEM_SIZE 0x04000
+#define W5500_RX_MEM_START 0x30000
+#define W5500_RX_MEM_SIZE 0x04000
/*
* Device driver private data structure
*/
+
struct w5100_priv {
- void __iomem *base;
- spinlock_t reg_lock;
- bool indirect;
- u8 (*read)(struct w5100_priv *priv, u16 addr);
- void (*write)(struct w5100_priv *priv, u16 addr, u8 data);
- u16 (*read16)(struct w5100_priv *priv, u16 addr);
- void (*write16)(struct w5100_priv *priv, u16 addr, u16 data);
- void (*readbuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
- void (*writebuf)(struct w5100_priv *priv, u16 addr, u8 *buf, int len);
+ const struct w5100_ops *ops;
+
+ /* Socket 0 register offset address */
+ u32 s0_regs;
+ /* Socket 0 TX buffer offset address and size */
+ u32 s0_tx_buf;
+ u16 s0_tx_buf_size;
+ /* Socket 0 RX buffer offset address and size */
+ u32 s0_rx_buf;
+ u16 s0_rx_buf_size;
+
int irq;
int link_irq;
int link_gpio;
@@ -103,6 +165,13 @@ struct w5100_priv {
struct net_device *ndev;
bool promisc;
u32 msg_enable;
+
+ struct workqueue_struct *xfer_wq;
+ struct work_struct rx_work;
+ struct sk_buff *tx_skb;
+ struct work_struct tx_work;
+ struct work_struct setrx_work;
+ struct work_struct restart_work;
};
/************************************************************************
@@ -111,63 +180,122 @@ struct w5100_priv {
*
***********************************************************************/
+struct w5100_mmio_priv {
+ void __iomem *base;
+ /* Serialize access in indirect address mode */
+ spinlock_t reg_lock;
+};
+
+static inline struct w5100_mmio_priv *w5100_mmio_priv(struct net_device *dev)
+{
+ return w5100_ops_priv(dev);
+}
+
+static inline void __iomem *w5100_mmio(struct net_device *ndev)
+{
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
+
+ return mmio_priv->base;
+}
+
/*
* In direct address mode host system can directly access W5100 registers
* after mapping to Memory-Mapped I/O space.
*
* 0x8000 bytes are required for memory space.
*/
-static inline u8 w5100_read_direct(struct w5100_priv *priv, u16 addr)
+static inline int w5100_read_direct(struct net_device *ndev, u32 addr)
{
- return ioread8(priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+ return ioread8(w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
}
-static inline void w5100_write_direct(struct w5100_priv *priv,
- u16 addr, u8 data)
+static inline int __w5100_write_direct(struct net_device *ndev, u32 addr,
+ u8 data)
{
- iowrite8(data, priv->base + (addr << CONFIG_WIZNET_BUS_SHIFT));
+ iowrite8(data, w5100_mmio(ndev) + (addr << CONFIG_WIZNET_BUS_SHIFT));
+
+ return 0;
}
-static u16 w5100_read16_direct(struct w5100_priv *priv, u16 addr)
+static inline int w5100_write_direct(struct net_device *ndev, u32 addr, u8 data)
+{
+ __w5100_write_direct(ndev, addr, data);
+ mmiowb();
+
+ return 0;
+}
+
+static int w5100_read16_direct(struct net_device *ndev, u32 addr)
{
u16 data;
- data = w5100_read_direct(priv, addr) << 8;
- data |= w5100_read_direct(priv, addr + 1);
+ data = w5100_read_direct(ndev, addr) << 8;
+ data |= w5100_read_direct(ndev, addr + 1);
return data;
}
-static void w5100_write16_direct(struct w5100_priv *priv, u16 addr, u16 data)
+static int w5100_write16_direct(struct net_device *ndev, u32 addr, u16 data)
{
- w5100_write_direct(priv, addr, data >> 8);
- w5100_write_direct(priv, addr + 1, data);
+ __w5100_write_direct(ndev, addr, data >> 8);
+ __w5100_write_direct(ndev, addr + 1, data);
+ mmiowb();
+
+ return 0;
}
-static void w5100_readbuf_direct(struct w5100_priv *priv,
- u16 offset, u8 *buf, int len)
+static int w5100_readbulk_direct(struct net_device *ndev, u32 addr, u8 *buf,
+ int len)
{
- u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
int i;
- for (i = 0; i < len; i++, addr++) {
- if (unlikely(addr > W5100_RX_MEM_END))
- addr = W5100_RX_MEM_START;
- *buf++ = w5100_read_direct(priv, addr);
- }
+ for (i = 0; i < len; i++, addr++)
+ *buf++ = w5100_read_direct(ndev, addr);
+
+ return 0;
}
-static void w5100_writebuf_direct(struct w5100_priv *priv,
- u16 offset, u8 *buf, int len)
+static int w5100_writebulk_direct(struct net_device *ndev, u32 addr,
+ const u8 *buf, int len)
{
- u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
int i;
- for (i = 0; i < len; i++, addr++) {
- if (unlikely(addr > W5100_TX_MEM_END))
- addr = W5100_TX_MEM_START;
- w5100_write_direct(priv, addr, *buf++);
- }
+ for (i = 0; i < len; i++, addr++)
+ __w5100_write_direct(ndev, addr, *buf++);
+
+ mmiowb();
+
+ return 0;
}
+static int w5100_mmio_init(struct net_device *ndev)
+{
+ struct platform_device *pdev = to_platform_device(ndev->dev.parent);
+ struct w5100_priv *priv = netdev_priv(ndev);
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
+ struct resource *mem;
+
+ spin_lock_init(&mmio_priv->reg_lock);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mmio_priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(mmio_priv->base))
+ return PTR_ERR(mmio_priv->base);
+
+ netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, priv->irq);
+
+ return 0;
+}
+
+static const struct w5100_ops w5100_mmio_direct_ops = {
+ .chip_id = W5100,
+ .read = w5100_read_direct,
+ .write = w5100_write_direct,
+ .read16 = w5100_read16_direct,
+ .write16 = w5100_write16_direct,
+ .readbulk = w5100_readbulk_direct,
+ .writebulk = w5100_writebulk_direct,
+ .init = w5100_mmio_init,
+};
+
/*
* In indirect address mode host system indirectly accesses registers by
* using Indirect Mode Address Register (IDM_AR) and Indirect Mode Data
@@ -179,139 +307,290 @@ static void w5100_writebuf_direct(struct w5100_priv *priv,
#define W5100_IDM_AR 0x01 /* Indirect Mode Address Register */
#define W5100_IDM_DR 0x03 /* Indirect Mode Data Register */
-static u8 w5100_read_indirect(struct w5100_priv *priv, u16 addr)
+static int w5100_read_indirect(struct net_device *ndev, u32 addr)
{
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
u8 data;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- data = w5100_read_direct(priv, W5100_IDM_DR);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+ data = w5100_read_direct(ndev, W5100_IDM_DR);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return data;
}
-static void w5100_write_indirect(struct w5100_priv *priv, u16 addr, u8 data)
+static int w5100_write_indirect(struct net_device *ndev, u32 addr, u8 data)
{
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- w5100_write_direct(priv, W5100_IDM_DR, data);
- mmiowb();
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+ w5100_write_direct(ndev, W5100_IDM_DR, data);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+ return 0;
}
-static u16 w5100_read16_indirect(struct w5100_priv *priv, u16 addr)
+static int w5100_read16_indirect(struct net_device *ndev, u32 addr)
{
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
u16 data;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- data = w5100_read_direct(priv, W5100_IDM_DR) << 8;
- data |= w5100_read_direct(priv, W5100_IDM_DR);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+ data = w5100_read_direct(ndev, W5100_IDM_DR) << 8;
+ data |= w5100_read_direct(ndev, W5100_IDM_DR);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
return data;
}
-static void w5100_write16_indirect(struct w5100_priv *priv, u16 addr, u16 data)
+static int w5100_write16_indirect(struct net_device *ndev, u32 addr, u16 data)
{
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- w5100_write_direct(priv, W5100_IDM_DR, data >> 8);
- w5100_write_direct(priv, W5100_IDM_DR, data);
- mmiowb();
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+ __w5100_write_direct(ndev, W5100_IDM_DR, data >> 8);
+ w5100_write_direct(ndev, W5100_IDM_DR, data);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+ return 0;
}
-static void w5100_readbuf_indirect(struct w5100_priv *priv,
- u16 offset, u8 *buf, int len)
+static int w5100_readbulk_indirect(struct net_device *ndev, u32 addr, u8 *buf,
+ int len)
{
- u16 addr = W5100_RX_MEM_START + (offset & W5100_RX_MEM_MASK);
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
int i;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+
+ for (i = 0; i < len; i++)
+ *buf++ = w5100_read_direct(ndev, W5100_IDM_DR);
- for (i = 0; i < len; i++, addr++) {
- if (unlikely(addr > W5100_RX_MEM_END)) {
- addr = W5100_RX_MEM_START;
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- }
- *buf++ = w5100_read_direct(priv, W5100_IDM_DR);
- }
mmiowb();
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+ return 0;
}
-static void w5100_writebuf_indirect(struct w5100_priv *priv,
- u16 offset, u8 *buf, int len)
+static int w5100_writebulk_indirect(struct net_device *ndev, u32 addr,
+ const u8 *buf, int len)
{
- u16 addr = W5100_TX_MEM_START + (offset & W5100_TX_MEM_MASK);
+ struct w5100_mmio_priv *mmio_priv = w5100_mmio_priv(ndev);
unsigned long flags;
int i;
- spin_lock_irqsave(&priv->reg_lock, flags);
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
+ spin_lock_irqsave(&mmio_priv->reg_lock, flags);
+ w5100_write16_direct(ndev, W5100_IDM_AR, addr);
+
+ for (i = 0; i < len; i++)
+ __w5100_write_direct(ndev, W5100_IDM_DR, *buf++);
- for (i = 0; i < len; i++, addr++) {
- if (unlikely(addr > W5100_TX_MEM_END)) {
- addr = W5100_TX_MEM_START;
- w5100_write16_direct(priv, W5100_IDM_AR, addr);
- mmiowb();
- }
- w5100_write_direct(priv, W5100_IDM_DR, *buf++);
- }
mmiowb();
- spin_unlock_irqrestore(&priv->reg_lock, flags);
+ spin_unlock_irqrestore(&mmio_priv->reg_lock, flags);
+
+ return 0;
}
+static int w5100_reset_indirect(struct net_device *ndev)
+{
+ w5100_write_direct(ndev, W5100_MR, MR_RST);
+ mdelay(5);
+ w5100_write_direct(ndev, W5100_MR, MR_PB | MR_AI | MR_IND);
+
+ return 0;
+}
+
+static const struct w5100_ops w5100_mmio_indirect_ops = {
+ .chip_id = W5100,
+ .read = w5100_read_indirect,
+ .write = w5100_write_indirect,
+ .read16 = w5100_read16_indirect,
+ .write16 = w5100_write16_indirect,
+ .readbulk = w5100_readbulk_indirect,
+ .writebulk = w5100_writebulk_indirect,
+ .init = w5100_mmio_init,
+ .reset = w5100_reset_indirect,
+};
+
#if defined(CONFIG_WIZNET_BUS_DIRECT)
-#define w5100_read w5100_read_direct
-#define w5100_write w5100_write_direct
-#define w5100_read16 w5100_read16_direct
-#define w5100_write16 w5100_write16_direct
-#define w5100_readbuf w5100_readbuf_direct
-#define w5100_writebuf w5100_writebuf_direct
+
+static int w5100_read(struct w5100_priv *priv, u32 addr)
+{
+ return w5100_read_direct(priv->ndev, addr);
+}
+
+static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
+{
+ return w5100_write_direct(priv->ndev, addr, data);
+}
+
+static int w5100_read16(struct w5100_priv *priv, u32 addr)
+{
+ return w5100_read16_direct(priv->ndev, addr);
+}
+
+static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
+{
+ return w5100_write16_direct(priv->ndev, addr, data);
+}
+
+static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
+{
+ return w5100_readbulk_direct(priv->ndev, addr, buf, len);
+}
+
+static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
+ int len)
+{
+ return w5100_writebulk_direct(priv->ndev, addr, buf, len);
+}
#elif defined(CONFIG_WIZNET_BUS_INDIRECT)
-#define w5100_read w5100_read_indirect
-#define w5100_write w5100_write_indirect
-#define w5100_read16 w5100_read16_indirect
-#define w5100_write16 w5100_write16_indirect
-#define w5100_readbuf w5100_readbuf_indirect
-#define w5100_writebuf w5100_writebuf_indirect
+
+static int w5100_read(struct w5100_priv *priv, u32 addr)
+{
+ return w5100_read_indirect(priv->ndev, addr);
+}
+
+static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
+{
+ return w5100_write_indirect(priv->ndev, addr, data);
+}
+
+static int w5100_read16(struct w5100_priv *priv, u32 addr)
+{
+ return w5100_read16_indirect(priv->ndev, addr);
+}
+
+static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
+{
+ return w5100_write16_indirect(priv->ndev, addr, data);
+}
+
+static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
+{
+ return w5100_readbulk_indirect(priv->ndev, addr, buf, len);
+}
+
+static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
+ int len)
+{
+ return w5100_writebulk_indirect(priv->ndev, addr, buf, len);
+}
#else /* CONFIG_WIZNET_BUS_ANY */
-#define w5100_read priv->read
-#define w5100_write priv->write
-#define w5100_read16 priv->read16
-#define w5100_write16 priv->write16
-#define w5100_readbuf priv->readbuf
-#define w5100_writebuf priv->writebuf
+
+static int w5100_read(struct w5100_priv *priv, u32 addr)
+{
+ return priv->ops->read(priv->ndev, addr);
+}
+
+static int w5100_write(struct w5100_priv *priv, u32 addr, u8 data)
+{
+ return priv->ops->write(priv->ndev, addr, data);
+}
+
+static int w5100_read16(struct w5100_priv *priv, u32 addr)
+{
+ return priv->ops->read16(priv->ndev, addr);
+}
+
+static int w5100_write16(struct w5100_priv *priv, u32 addr, u16 data)
+{
+ return priv->ops->write16(priv->ndev, addr, data);
+}
+
+static int w5100_readbulk(struct w5100_priv *priv, u32 addr, u8 *buf, int len)
+{
+ return priv->ops->readbulk(priv->ndev, addr, buf, len);
+}
+
+static int w5100_writebulk(struct w5100_priv *priv, u32 addr, const u8 *buf,
+ int len)
+{
+ return priv->ops->writebulk(priv->ndev, addr, buf, len);
+}
+
#endif
+static int w5100_readbuf(struct w5100_priv *priv, u16 offset, u8 *buf, int len)
+{
+ u32 addr;
+ int remain = 0;
+ int ret;
+ const u32 mem_start = priv->s0_rx_buf;
+ const u16 mem_size = priv->s0_rx_buf_size;
+
+ offset %= mem_size;
+ addr = mem_start + offset;
+
+ if (offset + len > mem_size) {
+ remain = (offset + len) % mem_size;
+ len = mem_size - offset;
+ }
+
+ ret = w5100_readbulk(priv, addr, buf, len);
+ if (ret || !remain)
+ return ret;
+
+ return w5100_readbulk(priv, mem_start, buf + len, remain);
+}
+
+static int w5100_writebuf(struct w5100_priv *priv, u16 offset, const u8 *buf,
+ int len)
+{
+ u32 addr;
+ int ret;
+ int remain = 0;
+ const u32 mem_start = priv->s0_tx_buf;
+ const u16 mem_size = priv->s0_tx_buf_size;
+
+ offset %= mem_size;
+ addr = mem_start + offset;
+
+ if (offset + len > mem_size) {
+ remain = (offset + len) % mem_size;
+ len = mem_size - offset;
+ }
+
+ ret = w5100_writebulk(priv, addr, buf, len);
+ if (ret || !remain)
+ return ret;
+
+ return w5100_writebulk(priv, mem_start, buf + len, remain);
+}
+
+static int w5100_reset(struct w5100_priv *priv)
+{
+ if (priv->ops->reset)
+ return priv->ops->reset(priv->ndev);
+
+ w5100_write(priv, W5100_MR, MR_RST);
+ mdelay(5);
+ w5100_write(priv, W5100_MR, MR_PB);
+
+ return 0;
+}
+
static int w5100_command(struct w5100_priv *priv, u16 cmd)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(100);
+ unsigned long timeout;
- w5100_write(priv, W5100_S0_CR, cmd);
- mmiowb();
+ w5100_write(priv, W5100_S0_CR(priv), cmd);
+
+ timeout = jiffies + msecs_to_jiffies(100);
- while (w5100_read(priv, W5100_S0_CR) != 0) {
+ while (w5100_read(priv, W5100_S0_CR(priv)) != 0) {
if (time_after(jiffies, timeout))
return -EIO;
cpu_relax();
@@ -323,47 +602,124 @@ static int w5100_command(struct w5100_priv *priv, u16 cmd)
static void w5100_write_macaddr(struct w5100_priv *priv)
{
struct net_device *ndev = priv->ndev;
- int i;
- for (i = 0; i < ETH_ALEN; i++)
- w5100_write(priv, W5100_SHAR + i, ndev->dev_addr[i]);
- mmiowb();
+ w5100_writebulk(priv, W5100_SHAR, ndev->dev_addr, ETH_ALEN);
}
-static void w5100_hw_reset(struct w5100_priv *priv)
+static void w5100_socket_intr_mask(struct w5100_priv *priv, u8 mask)
{
- w5100_write_direct(priv, W5100_MR, MR_RST);
- mmiowb();
- mdelay(5);
- w5100_write_direct(priv, W5100_MR, priv->indirect ?
- MR_PB | MR_AI | MR_IND :
- MR_PB);
- mmiowb();
- w5100_write(priv, W5100_IMR, 0);
- w5100_write_macaddr(priv);
+ u32 imr;
+
+ if (priv->ops->chip_id == W5500)
+ imr = W5500_SIMR;
+ else
+ imr = W5100_IMR;
+
+ w5100_write(priv, imr, mask);
+}
+static void w5100_enable_intr(struct w5100_priv *priv)
+{
+ w5100_socket_intr_mask(priv, IR_S0);
+}
+
+static void w5100_disable_intr(struct w5100_priv *priv)
+{
+ w5100_socket_intr_mask(priv, 0);
+}
+
+static void w5100_memory_configure(struct w5100_priv *priv)
+{
/* Configure 16K of internal memory
* as 8K RX buffer and 8K TX buffer
*/
w5100_write(priv, W5100_RMSR, 0x03);
w5100_write(priv, W5100_TMSR, 0x03);
- mmiowb();
+}
+
+static void w5200_memory_configure(struct w5100_priv *priv)
+{
+ int i;
+
+ /* Configure internal RX memory as 16K RX buffer and
+ * internal TX memory as 16K TX buffer
+ */
+ w5100_write(priv, W5200_Sn_RXMEM_SIZE(0), 0x10);
+ w5100_write(priv, W5200_Sn_TXMEM_SIZE(0), 0x10);
+
+ for (i = 1; i < 8; i++) {
+ w5100_write(priv, W5200_Sn_RXMEM_SIZE(i), 0);
+ w5100_write(priv, W5200_Sn_TXMEM_SIZE(i), 0);
+ }
+}
+
+static void w5500_memory_configure(struct w5100_priv *priv)
+{
+ int i;
+
+ /* Configure internal RX memory as 16K RX buffer and
+ * internal TX memory as 16K TX buffer
+ */
+ w5100_write(priv, W5500_Sn_RXMEM_SIZE(0), 0x10);
+ w5100_write(priv, W5500_Sn_TXMEM_SIZE(0), 0x10);
+
+ for (i = 1; i < 8; i++) {
+ w5100_write(priv, W5500_Sn_RXMEM_SIZE(i), 0);
+ w5100_write(priv, W5500_Sn_TXMEM_SIZE(i), 0);
+ }
+}
+
+static int w5100_hw_reset(struct w5100_priv *priv)
+{
+ u32 rtr;
+
+ w5100_reset(priv);
+
+ w5100_disable_intr(priv);
+ w5100_write_macaddr(priv);
+
+ switch (priv->ops->chip_id) {
+ case W5100:
+ w5100_memory_configure(priv);
+ rtr = W5100_RTR;
+ break;
+ case W5200:
+ w5200_memory_configure(priv);
+ rtr = W5100_RTR;
+ break;
+ case W5500:
+ w5500_memory_configure(priv);
+ rtr = W5500_RTR;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (w5100_read16(priv, rtr) != RTR_DEFAULT)
+ return -ENODEV;
+
+ return 0;
}
static void w5100_hw_start(struct w5100_priv *priv)
{
- w5100_write(priv, W5100_S0_MR, priv->promisc ?
- S0_MR_MACRAW : S0_MR_MACRAW_MF);
- mmiowb();
+ u8 mode = S0_MR_MACRAW;
+
+ if (!priv->promisc) {
+ if (priv->ops->chip_id == W5500)
+ mode |= W5500_S0_MR_MF;
+ else
+ mode |= S0_MR_MF;
+ }
+
+ w5100_write(priv, W5100_S0_MR(priv), mode);
w5100_command(priv, S0_CR_OPEN);
- w5100_write(priv, W5100_IMR, IR_S0);
- mmiowb();
+ w5100_enable_intr(priv);
}
static void w5100_hw_close(struct w5100_priv *priv)
{
- w5100_write(priv, W5100_IMR, 0);
- mmiowb();
+ w5100_disable_intr(priv);
w5100_command(priv, S0_CR_CLOSE);
}
@@ -412,20 +768,17 @@ static int w5100_get_regs_len(struct net_device *ndev)
}
static void w5100_get_regs(struct net_device *ndev,
- struct ethtool_regs *regs, void *_buf)
+ struct ethtool_regs *regs, void *buf)
{
struct w5100_priv *priv = netdev_priv(ndev);
- u8 *buf = _buf;
- u16 i;
regs->version = 1;
- for (i = 0; i < W5100_COMMON_REGS_LEN; i++)
- *buf++ = w5100_read(priv, W5100_COMMON_REGS + i);
- for (i = 0; i < W5100_S0_REGS_LEN; i++)
- *buf++ = w5100_read(priv, W5100_S0_REGS + i);
+ w5100_readbulk(priv, W5100_COMMON_REGS, buf, W5100_COMMON_REGS_LEN);
+ buf += W5100_COMMON_REGS_LEN;
+ w5100_readbulk(priv, S0_REGS(priv), buf, W5100_S0_REGS_LEN);
}
-static void w5100_tx_timeout(struct net_device *ndev)
+static void w5100_restart(struct net_device *ndev)
{
struct w5100_priv *priv = netdev_priv(ndev);
@@ -433,74 +786,138 @@ static void w5100_tx_timeout(struct net_device *ndev)
w5100_hw_reset(priv);
w5100_hw_start(priv);
ndev->stats.tx_errors++;
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
netif_wake_queue(ndev);
}
-static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+static void w5100_restart_work(struct work_struct *work)
+{
+ struct w5100_priv *priv = container_of(work, struct w5100_priv,
+ restart_work);
+
+ w5100_restart(priv->ndev);
+}
+
+static void w5100_tx_timeout(struct net_device *ndev)
{
struct w5100_priv *priv = netdev_priv(ndev);
- u16 offset;
- netif_stop_queue(ndev);
+ if (priv->ops->may_sleep)
+ schedule_work(&priv->restart_work);
+ else
+ w5100_restart(ndev);
+}
- offset = w5100_read16(priv, W5100_S0_TX_WR);
+static void w5100_tx_skb(struct net_device *ndev, struct sk_buff *skb)
+{
+ struct w5100_priv *priv = netdev_priv(ndev);
+ u16 offset;
+
+ offset = w5100_read16(priv, W5100_S0_TX_WR(priv));
w5100_writebuf(priv, offset, skb->data, skb->len);
- w5100_write16(priv, W5100_S0_TX_WR, offset + skb->len);
- mmiowb();
+ w5100_write16(priv, W5100_S0_TX_WR(priv), offset + skb->len);
ndev->stats.tx_bytes += skb->len;
ndev->stats.tx_packets++;
dev_kfree_skb(skb);
w5100_command(priv, S0_CR_SEND);
+}
+
+static void w5100_tx_work(struct work_struct *work)
+{
+ struct w5100_priv *priv = container_of(work, struct w5100_priv,
+ tx_work);
+ struct sk_buff *skb = priv->tx_skb;
+
+ priv->tx_skb = NULL;
+
+ if (WARN_ON(!skb))
+ return;
+ w5100_tx_skb(priv->ndev, skb);
+}
+
+static int w5100_start_tx(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct w5100_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+
+ if (priv->ops->may_sleep) {
+ WARN_ON(priv->tx_skb);
+ priv->tx_skb = skb;
+ queue_work(priv->xfer_wq, &priv->tx_work);
+ } else {
+ w5100_tx_skb(ndev, skb);
+ }
return NETDEV_TX_OK;
}
-static int w5100_napi_poll(struct napi_struct *napi, int budget)
+static struct sk_buff *w5100_rx_skb(struct net_device *ndev)
{
- struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
- struct net_device *ndev = priv->ndev;
+ struct w5100_priv *priv = netdev_priv(ndev);
struct sk_buff *skb;
- int rx_count;
u16 rx_len;
u16 offset;
u8 header[2];
+ u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR(priv));
- for (rx_count = 0; rx_count < budget; rx_count++) {
- u16 rx_buf_len = w5100_read16(priv, W5100_S0_RX_RSR);
- if (rx_buf_len == 0)
- break;
+ if (rx_buf_len == 0)
+ return NULL;
- offset = w5100_read16(priv, W5100_S0_RX_RD);
- w5100_readbuf(priv, offset, header, 2);
- rx_len = get_unaligned_be16(header) - 2;
-
- skb = netdev_alloc_skb_ip_align(ndev, rx_len);
- if (unlikely(!skb)) {
- w5100_write16(priv, W5100_S0_RX_RD,
- offset + rx_buf_len);
- w5100_command(priv, S0_CR_RECV);
- ndev->stats.rx_dropped++;
- return -ENOMEM;
- }
+ offset = w5100_read16(priv, W5100_S0_RX_RD(priv));
+ w5100_readbuf(priv, offset, header, 2);
+ rx_len = get_unaligned_be16(header) - 2;
- skb_put(skb, rx_len);
- w5100_readbuf(priv, offset + 2, skb->data, rx_len);
- w5100_write16(priv, W5100_S0_RX_RD, offset + 2 + rx_len);
- mmiowb();
+ skb = netdev_alloc_skb_ip_align(ndev, rx_len);
+ if (unlikely(!skb)) {
+ w5100_write16(priv, W5100_S0_RX_RD(priv), offset + rx_buf_len);
w5100_command(priv, S0_CR_RECV);
- skb->protocol = eth_type_trans(skb, ndev);
+ ndev->stats.rx_dropped++;
+ return NULL;
+ }
+
+ skb_put(skb, rx_len);
+ w5100_readbuf(priv, offset + 2, skb->data, rx_len);
+ w5100_write16(priv, W5100_S0_RX_RD(priv), offset + 2 + rx_len);
+ w5100_command(priv, S0_CR_RECV);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += rx_len;
+
+ return skb;
+}
+
+static void w5100_rx_work(struct work_struct *work)
+{
+ struct w5100_priv *priv = container_of(work, struct w5100_priv,
+ rx_work);
+ struct sk_buff *skb;
+
+ while ((skb = w5100_rx_skb(priv->ndev)))
+ netif_rx_ni(skb);
+
+ w5100_enable_intr(priv);
+}
+
+static int w5100_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct w5100_priv *priv = container_of(napi, struct w5100_priv, napi);
+ int rx_count;
+
+ for (rx_count = 0; rx_count < budget; rx_count++) {
+ struct sk_buff *skb = w5100_rx_skb(priv->ndev);
- netif_receive_skb(skb);
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += rx_len;
+ if (skb)
+ netif_receive_skb(skb);
+ else
+ break;
}
if (rx_count < budget) {
napi_complete(napi);
- w5100_write(priv, W5100_IMR, IR_S0);
- mmiowb();
+ w5100_enable_intr(priv);
}
return rx_count;
@@ -511,11 +928,10 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
struct net_device *ndev = ndev_instance;
struct w5100_priv *priv = netdev_priv(ndev);
- int ir = w5100_read(priv, W5100_S0_IR);
+ int ir = w5100_read(priv, W5100_S0_IR(priv));
if (!ir)
return IRQ_NONE;
- w5100_write(priv, W5100_S0_IR, ir);
- mmiowb();
+ w5100_write(priv, W5100_S0_IR(priv), ir);
if (ir & S0_IR_SENDOK) {
netif_dbg(priv, tx_done, ndev, "tx done\n");
@@ -523,11 +939,12 @@ static irqreturn_t w5100_interrupt(int irq, void *ndev_instance)
}
if (ir & S0_IR_RECV) {
- if (napi_schedule_prep(&priv->napi)) {
- w5100_write(priv, W5100_IMR, 0);
- mmiowb();
+ w5100_disable_intr(priv);
+
+ if (priv->ops->may_sleep)
+ queue_work(priv->xfer_wq, &priv->rx_work);
+ else if (napi_schedule_prep(&priv->napi))
__napi_schedule(&priv->napi);
- }
}
return IRQ_HANDLED;
@@ -551,6 +968,14 @@ static irqreturn_t w5100_detect_link(int irq, void *ndev_instance)
return IRQ_HANDLED;
}
+static void w5100_setrx_work(struct work_struct *work)
+{
+ struct w5100_priv *priv = container_of(work, struct w5100_priv,
+ setrx_work);
+
+ w5100_hw_start(priv);
+}
+
static void w5100_set_rx_mode(struct net_device *ndev)
{
struct w5100_priv *priv = netdev_priv(ndev);
@@ -558,7 +983,11 @@ static void w5100_set_rx_mode(struct net_device *ndev)
if (priv->promisc != set_promisc) {
priv->promisc = set_promisc;
- w5100_hw_start(priv);
+
+ if (priv->ops->may_sleep)
+ schedule_work(&priv->setrx_work);
+ else
+ w5100_hw_start(priv);
}
}
@@ -620,95 +1049,100 @@ static const struct net_device_ops w5100_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
};
-static int w5100_hw_probe(struct platform_device *pdev)
+static int w5100_mmio_probe(struct platform_device *pdev)
{
struct wiznet_platform_data *data = dev_get_platdata(&pdev->dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct w5100_priv *priv = netdev_priv(ndev);
- const char *name = netdev_name(ndev);
+ const void *mac_addr = NULL;
struct resource *mem;
- int mem_size;
+ const struct w5100_ops *ops;
int irq;
- int ret;
- if (data && is_valid_ether_addr(data->mac_addr)) {
- memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
- } else {
- eth_hw_addr_random(ndev);
- }
+ if (data && is_valid_ether_addr(data->mac_addr))
+ mac_addr = data->mac_addr;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(priv->base))
- return PTR_ERR(priv->base);
-
- mem_size = resource_size(mem);
-
- spin_lock_init(&priv->reg_lock);
- priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
- if (priv->indirect) {
- priv->read = w5100_read_indirect;
- priv->write = w5100_write_indirect;
- priv->read16 = w5100_read16_indirect;
- priv->write16 = w5100_write16_indirect;
- priv->readbuf = w5100_readbuf_indirect;
- priv->writebuf = w5100_writebuf_indirect;
- } else {
- priv->read = w5100_read_direct;
- priv->write = w5100_write_direct;
- priv->read16 = w5100_read16_direct;
- priv->write16 = w5100_write16_direct;
- priv->readbuf = w5100_readbuf_direct;
- priv->writebuf = w5100_writebuf_direct;
- }
-
- w5100_hw_reset(priv);
- if (w5100_read16(priv, W5100_RTR) != RTR_DEFAULT)
- return -ENODEV;
+ if (resource_size(mem) < W5100_BUS_DIRECT_SIZE)
+ ops = &w5100_mmio_indirect_ops;
+ else
+ ops = &w5100_mmio_direct_ops;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
- ret = request_irq(irq, w5100_interrupt,
- IRQ_TYPE_LEVEL_LOW, name, ndev);
- if (ret < 0)
- return ret;
- priv->irq = irq;
- priv->link_gpio = data ? data->link_gpio : -EINVAL;
- if (gpio_is_valid(priv->link_gpio)) {
- char *link_name = devm_kzalloc(&pdev->dev, 16, GFP_KERNEL);
- if (!link_name)
- return -ENOMEM;
- snprintf(link_name, 16, "%s-link", name);
- priv->link_irq = gpio_to_irq(priv->link_gpio);
- if (request_any_context_irq(priv->link_irq, w5100_detect_link,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- link_name, priv->ndev) < 0)
- priv->link_gpio = -EINVAL;
- }
+ return w5100_probe(&pdev->dev, ops, sizeof(struct w5100_mmio_priv),
+ mac_addr, irq, data ? data->link_gpio : -EINVAL);
+}
- netdev_info(ndev, "at 0x%llx irq %d\n", (u64)mem->start, irq);
- return 0;
+static int w5100_mmio_remove(struct platform_device *pdev)
+{
+ return w5100_remove(&pdev->dev);
+}
+
+void *w5100_ops_priv(const struct net_device *ndev)
+{
+ return netdev_priv(ndev) +
+ ALIGN(sizeof(struct w5100_priv), NETDEV_ALIGN);
}
+EXPORT_SYMBOL_GPL(w5100_ops_priv);
-static int w5100_probe(struct platform_device *pdev)
+int w5100_probe(struct device *dev, const struct w5100_ops *ops,
+ int sizeof_ops_priv, const void *mac_addr, int irq,
+ int link_gpio)
{
struct w5100_priv *priv;
struct net_device *ndev;
int err;
+ size_t alloc_size;
- ndev = alloc_etherdev(sizeof(*priv));
+ alloc_size = sizeof(*priv);
+ if (sizeof_ops_priv) {
+ alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
+ alloc_size += sizeof_ops_priv;
+ }
+ alloc_size += NETDEV_ALIGN - 1;
+
+ ndev = alloc_etherdev(alloc_size);
if (!ndev)
return -ENOMEM;
- SET_NETDEV_DEV(ndev, &pdev->dev);
- platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, dev);
+ dev_set_drvdata(dev, ndev);
priv = netdev_priv(ndev);
+
+ switch (ops->chip_id) {
+ case W5100:
+ priv->s0_regs = W5100_S0_REGS;
+ priv->s0_tx_buf = W5100_TX_MEM_START;
+ priv->s0_tx_buf_size = W5100_TX_MEM_SIZE;
+ priv->s0_rx_buf = W5100_RX_MEM_START;
+ priv->s0_rx_buf_size = W5100_RX_MEM_SIZE;
+ break;
+ case W5200:
+ priv->s0_regs = W5200_S0_REGS;
+ priv->s0_tx_buf = W5200_TX_MEM_START;
+ priv->s0_tx_buf_size = W5200_TX_MEM_SIZE;
+ priv->s0_rx_buf = W5200_RX_MEM_START;
+ priv->s0_rx_buf_size = W5200_RX_MEM_SIZE;
+ break;
+ case W5500:
+ priv->s0_regs = W5500_S0_REGS;
+ priv->s0_tx_buf = W5500_TX_MEM_START;
+ priv->s0_tx_buf_size = W5500_TX_MEM_SIZE;
+ priv->s0_rx_buf = W5500_RX_MEM_START;
+ priv->s0_rx_buf_size = W5500_RX_MEM_SIZE;
+ break;
+ default:
+ err = -EINVAL;
+ goto err_register;
+ }
+
priv->ndev = ndev;
+ priv->ops = ops;
+ priv->irq = irq;
+ priv->link_gpio = link_gpio;
ndev->netdev_ops = &w5100_netdev_ops;
ndev->ethtool_ops = &w5100_ethtool_ops;
- ndev->watchdog_timeo = HZ;
netif_napi_add(ndev, &priv->napi, w5100_napi_poll, 16);
/* This chip doesn't support VLAN packets with normal MTU,
@@ -720,22 +1154,76 @@ static int w5100_probe(struct platform_device *pdev)
if (err < 0)
goto err_register;
- err = w5100_hw_probe(pdev);
- if (err < 0)
- goto err_hw_probe;
+ priv->xfer_wq = alloc_workqueue(netdev_name(ndev), WQ_MEM_RECLAIM, 0);
+ if (!priv->xfer_wq) {
+ err = -ENOMEM;
+ goto err_wq;
+ }
+
+ INIT_WORK(&priv->rx_work, w5100_rx_work);
+ INIT_WORK(&priv->tx_work, w5100_tx_work);
+ INIT_WORK(&priv->setrx_work, w5100_setrx_work);
+ INIT_WORK(&priv->restart_work, w5100_restart_work);
+
+ if (mac_addr)
+ memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ else
+ eth_hw_addr_random(ndev);
+
+ if (priv->ops->init) {
+ err = priv->ops->init(priv->ndev);
+ if (err)
+ goto err_hw;
+ }
+
+ err = w5100_hw_reset(priv);
+ if (err)
+ goto err_hw;
+
+ if (ops->may_sleep) {
+ err = request_threaded_irq(priv->irq, NULL, w5100_interrupt,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ netdev_name(ndev), ndev);
+ } else {
+ err = request_irq(priv->irq, w5100_interrupt,
+ IRQF_TRIGGER_LOW, netdev_name(ndev), ndev);
+ }
+ if (err)
+ goto err_hw;
+
+ if (gpio_is_valid(priv->link_gpio)) {
+ char *link_name = devm_kzalloc(dev, 16, GFP_KERNEL);
+
+ if (!link_name) {
+ err = -ENOMEM;
+ goto err_gpio;
+ }
+ snprintf(link_name, 16, "%s-link", netdev_name(ndev));
+ priv->link_irq = gpio_to_irq(priv->link_gpio);
+ if (request_any_context_irq(priv->link_irq, w5100_detect_link,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING,
+ link_name, priv->ndev) < 0)
+ priv->link_gpio = -EINVAL;
+ }
return 0;
-err_hw_probe:
+err_gpio:
+ free_irq(priv->irq, ndev);
+err_hw:
+ destroy_workqueue(priv->xfer_wq);
+err_wq:
unregister_netdev(ndev);
err_register:
free_netdev(ndev);
return err;
}
+EXPORT_SYMBOL_GPL(w5100_probe);
-static int w5100_remove(struct platform_device *pdev)
+int w5100_remove(struct device *dev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct w5100_priv *priv = netdev_priv(ndev);
w5100_hw_reset(priv);
@@ -743,16 +1231,20 @@ static int w5100_remove(struct platform_device *pdev)
if (gpio_is_valid(priv->link_gpio))
free_irq(priv->link_irq, ndev);
+ flush_work(&priv->setrx_work);
+ flush_work(&priv->restart_work);
+ destroy_workqueue(priv->xfer_wq);
+
unregister_netdev(ndev);
free_netdev(ndev);
return 0;
}
+EXPORT_SYMBOL_GPL(w5100_remove);
#ifdef CONFIG_PM_SLEEP
static int w5100_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct w5100_priv *priv = netdev_priv(ndev);
if (netif_running(ndev)) {
@@ -766,8 +1258,7 @@ static int w5100_suspend(struct device *dev)
static int w5100_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct net_device *ndev = platform_get_drvdata(pdev);
+ struct net_device *ndev = dev_get_drvdata(dev);
struct w5100_priv *priv = netdev_priv(ndev);
if (netif_running(ndev)) {
@@ -783,15 +1274,15 @@ static int w5100_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
+SIMPLE_DEV_PM_OPS(w5100_pm_ops, w5100_suspend, w5100_resume);
+EXPORT_SYMBOL_GPL(w5100_pm_ops);
-static struct platform_driver w5100_driver = {
+static struct platform_driver w5100_mmio_driver = {
.driver = {
.name = DRV_NAME,
.pm = &w5100_pm_ops,
},
- .probe = w5100_probe,
- .remove = w5100_remove,
+ .probe = w5100_mmio_probe,
+ .remove = w5100_mmio_remove,
};
-
-module_platform_driver(w5100_driver);
+module_platform_driver(w5100_mmio_driver);
diff --git a/drivers/net/ethernet/wiznet/w5100.h b/drivers/net/ethernet/wiznet/w5100.h
new file mode 100644
index 000000000000..17983a3b8d6c
--- /dev/null
+++ b/drivers/net/ethernet/wiznet/w5100.h
@@ -0,0 +1,37 @@
+/*
+ * Ethernet driver for the WIZnet W5100 chip.
+ *
+ * Copyright (C) 2006-2008 WIZnet Co.,Ltd.
+ * Copyright (C) 2012 Mike Sinkovsky <msink@permonline.ru>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+enum {
+ W5100,
+ W5200,
+ W5500,
+};
+
+struct w5100_ops {
+ bool may_sleep;
+ int chip_id;
+ int (*read)(struct net_device *ndev, u32 addr);
+ int (*write)(struct net_device *ndev, u32 addr, u8 data);
+ int (*read16)(struct net_device *ndev, u32 addr);
+ int (*write16)(struct net_device *ndev, u32 addr, u16 data);
+ int (*readbulk)(struct net_device *ndev, u32 addr, u8 *buf, int len);
+ int (*writebulk)(struct net_device *ndev, u32 addr, const u8 *buf,
+ int len);
+ int (*reset)(struct net_device *ndev);
+ int (*init)(struct net_device *ndev);
+};
+
+void *w5100_ops_priv(const struct net_device *ndev);
+
+int w5100_probe(struct device *dev, const struct w5100_ops *ops,
+ int sizeof_ops_priv, const void *mac_addr, int irq,
+ int link_gpio);
+int w5100_remove(struct device *dev);
+
+extern const struct dev_pm_ops w5100_pm_ops;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 8da7b930ff59..0b37ce9f28f1 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -362,7 +362,7 @@ static void w5300_tx_timeout(struct net_device *ndev)
w5300_hw_reset(priv);
w5300_hw_start(priv);
ndev->stats.tx_errors++;
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
netif_wake_queue(ndev);
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 902457e43628..7d06e3e1abac 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -332,7 +332,6 @@ struct temac_local {
struct device *dev;
/* Connection to PHY device */
- struct phy_device *phy_dev; /* Pointer to PHY device */
struct device_node *phy_node;
/* MDIO bus data */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 5a1068df7038..a9bd665fd122 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -584,13 +584,13 @@ static void temac_device_reset(struct net_device *ndev)
dev_err(&ndev->dev, "Error setting TEMAC options\n");
/* Init Driver variable */
- ndev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(ndev); /* prevent tx timeout */
}
static void temac_adjust_link(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
- struct phy_device *phy = lp->phy_dev;
+ struct phy_device *phy = ndev->phydev;
u32 mii_speed;
int link_state;
@@ -843,19 +843,20 @@ static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
static int temac_open(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = NULL;
int rc;
dev_dbg(&ndev->dev, "temac_open()\n");
if (lp->phy_node) {
- lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
- temac_adjust_link, 0, 0);
- if (!lp->phy_dev) {
+ phydev = of_phy_connect(lp->ndev, lp->phy_node,
+ temac_adjust_link, 0, 0);
+ if (!phydev) {
dev_err(lp->dev, "of_phy_connect() failed\n");
return -ENODEV;
}
- phy_start(lp->phy_dev);
+ phy_start(phydev);
}
temac_device_reset(ndev);
@@ -872,9 +873,8 @@ static int temac_open(struct net_device *ndev)
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
- if (lp->phy_dev)
- phy_disconnect(lp->phy_dev);
- lp->phy_dev = NULL;
+ if (phydev)
+ phy_disconnect(phydev);
dev_err(lp->dev, "request_irq() failed\n");
return rc;
}
@@ -882,15 +882,15 @@ static int temac_open(struct net_device *ndev)
static int temac_stop(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
dev_dbg(&ndev->dev, "temac_close()\n");
free_irq(lp->tx_irq, ndev);
free_irq(lp->rx_irq, ndev);
- if (lp->phy_dev)
- phy_disconnect(lp->phy_dev);
- lp->phy_dev = NULL;
+ if (phydev)
+ phy_disconnect(phydev);
temac_dma_bd_release(ndev);
@@ -916,15 +916,13 @@ temac_poll_controller(struct net_device *ndev)
static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
{
- struct temac_local *lp = netdev_priv(ndev);
-
if (!netif_running(ndev))
return -EINVAL;
- if (!lp->phy_dev)
+ if (!ndev->phydev)
return -EINVAL;
- return phy_mii_ioctl(lp->phy_dev, rq, cmd);
+ return phy_mii_ioctl(ndev->phydev, rq, cmd);
}
static const struct net_device_ops temac_netdev_ops = {
@@ -969,30 +967,17 @@ static const struct attribute_group temac_attr_group = {
};
/* ethtool support */
-static int temac_get_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
-{
- struct temac_local *lp = netdev_priv(ndev);
- return phy_ethtool_gset(lp->phy_dev, cmd);
-}
-
-static int temac_set_settings(struct net_device *ndev, struct ethtool_cmd *cmd)
-{
- struct temac_local *lp = netdev_priv(ndev);
- return phy_ethtool_sset(lp->phy_dev, cmd);
-}
-
static int temac_nway_reset(struct net_device *ndev)
{
- struct temac_local *lp = netdev_priv(ndev);
- return phy_start_aneg(lp->phy_dev);
+ return phy_start_aneg(ndev->phydev);
}
static const struct ethtool_ops temac_ethtool_ops = {
- .get_settings = temac_get_settings,
- .set_settings = temac_set_settings,
.nway_reset = temac_nway_reset,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static int temac_of_probe(struct platform_device *op)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 9ead4e269409..af27f7d1cbf3 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -382,7 +382,6 @@ struct axidma_bd {
* struct axienet_local - axienet private per device data
* @ndev: Pointer for net_device to which it will be attached.
* @dev: Pointer to device structure
- * @phy_dev: Pointer to PHY device structure attached to the axienet_local
* @phy_node: Pointer to device node structure
* @mii_bus: Pointer to MII bus structure
* @regs: Base address for the axienet_local device address space
@@ -420,7 +419,6 @@ struct axienet_local {
struct device *dev;
/* Connection to PHY device */
- struct phy_device *phy_dev; /* Pointer to PHY device */
struct device_node *phy_node;
/* MDIO bus data */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4684644703cc..36ee7ab300ae 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -508,7 +508,7 @@ static void axienet_device_reset(struct net_device *ndev)
axienet_set_multicast_list(ndev);
axienet_setoptions(ndev, lp->options);
- ndev->trans_start = jiffies;
+ netif_trans_update(ndev);
}
/**
@@ -525,7 +525,7 @@ static void axienet_adjust_link(struct net_device *ndev)
u32 link_state;
u32 setspeed = 1;
struct axienet_local *lp = netdev_priv(ndev);
- struct phy_device *phy = lp->phy_dev;
+ struct phy_device *phy = ndev->phydev;
link_state = phy->speed | (phy->duplex << 1) | phy->link;
if (lp->last_link != link_state) {
@@ -911,6 +911,7 @@ static int axienet_open(struct net_device *ndev)
{
int ret, mdio_mcreg;
struct axienet_local *lp = netdev_priv(ndev);
+ struct phy_device *phydev = NULL;
dev_dbg(&ndev->dev, "axienet_open()\n");
@@ -934,19 +935,19 @@ static int axienet_open(struct net_device *ndev)
if (lp->phy_node) {
if (lp->phy_type == XAE_PHY_TYPE_GMII) {
- lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
- axienet_adjust_link, 0,
- PHY_INTERFACE_MODE_GMII);
+ phydev = of_phy_connect(lp->ndev, lp->phy_node,
+ axienet_adjust_link, 0,
+ PHY_INTERFACE_MODE_GMII);
} else if (lp->phy_type == XAE_PHY_TYPE_RGMII_2_0) {
- lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
- axienet_adjust_link, 0,
- PHY_INTERFACE_MODE_RGMII_ID);
+ phydev = of_phy_connect(lp->ndev, lp->phy_node,
+ axienet_adjust_link, 0,
+ PHY_INTERFACE_MODE_RGMII_ID);
}
- if (!lp->phy_dev)
+ if (!phydev)
dev_err(lp->dev, "of_phy_connect() failed\n");
else
- phy_start(lp->phy_dev);
+ phy_start(phydev);
}
/* Enable tasklets for Axi DMA error handling */
@@ -967,9 +968,8 @@ static int axienet_open(struct net_device *ndev)
err_rx_irq:
free_irq(lp->tx_irq, ndev);
err_tx_irq:
- if (lp->phy_dev)
- phy_disconnect(lp->phy_dev);
- lp->phy_dev = NULL;
+ if (phydev)
+ phy_disconnect(phydev);
tasklet_kill(&lp->dma_err_tasklet);
dev_err(lp->dev, "request_irq() failed\n");
return ret;
@@ -1006,9 +1006,8 @@ static int axienet_stop(struct net_device *ndev)
free_irq(lp->tx_irq, ndev);
free_irq(lp->rx_irq, ndev);
- if (lp->phy_dev)
- phy_disconnect(lp->phy_dev);
- lp->phy_dev = NULL;
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
axienet_dma_bd_release(ndev);
return 0;
@@ -1078,51 +1077,6 @@ static const struct net_device_ops axienet_netdev_ops = {
};
/**
- * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY.
- * @ndev: Pointer to net_device structure
- * @ecmd: Pointer to ethtool_cmd structure
- *
- * This implements ethtool command for getting PHY settings. If PHY could
- * not be found, the function returns -ENODEV. This function calls the
- * relevant PHY ethtool API to get the PHY settings.
- * Issue "ethtool ethX" under linux prompt to execute this function.
- *
- * Return: 0 on success, -ENODEV if PHY doesn't exist
- */
-static int axienet_ethtools_get_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
-{
- struct axienet_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
- if (!phydev)
- return -ENODEV;
- return phy_ethtool_gset(phydev, ecmd);
-}
-
-/**
- * axienet_ethtools_set_settings - Set PHY settings as passed in the argument.
- * @ndev: Pointer to net_device structure
- * @ecmd: Pointer to ethtool_cmd structure
- *
- * This implements ethtool command for setting various PHY settings. If PHY
- * could not be found, the function returns -ENODEV. This function calls the
- * relevant PHY ethtool API to set the PHY.
- * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this
- * function.
- *
- * Return: 0 on success, -ENODEV if PHY doesn't exist
- */
-static int axienet_ethtools_set_settings(struct net_device *ndev,
- struct ethtool_cmd *ecmd)
-{
- struct axienet_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = lp->phy_dev;
- if (!phydev)
- return -ENODEV;
- return phy_ethtool_sset(phydev, ecmd);
-}
-
-/**
* axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
* @ndev: Pointer to net_device structure
* @ed: Pointer to ethtool_drvinfo structure
@@ -1344,8 +1298,6 @@ static int axienet_ethtools_set_coalesce(struct net_device *ndev,
}
static struct ethtool_ops axienet_ethtool_ops = {
- .get_settings = axienet_ethtools_get_settings,
- .set_settings = axienet_ethtools_set_settings,
.get_drvinfo = axienet_ethtools_get_drvinfo,
.get_regs_len = axienet_ethtools_get_regs_len,
.get_regs = axienet_ethtools_get_regs,
@@ -1354,6 +1306,8 @@ static struct ethtool_ops axienet_ethtool_ops = {
.set_pauseparam = axienet_ethtools_set_pauseparam,
.get_coalesce = axienet_ethtools_get_coalesce,
.set_coalesce = axienet_ethtools_set_coalesce,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
/**
@@ -1587,9 +1541,9 @@ static int axienet_probe(struct platform_device *pdev)
/* Find the DMA node, map the DMA registers, and decode the DMA IRQs */
np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0);
- if (IS_ERR(np)) {
+ if (!np) {
dev_err(&pdev->dev, "could not find DMA node\n");
- ret = PTR_ERR(np);
+ ret = -ENODEV;
goto free_netdev;
}
ret = of_address_to_resource(np, 0, &dmares);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index e324b3092380..93dc10b10c09 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -531,7 +531,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
}
/* To exclude tx timeout */
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
/* We're all ready to go. Start the queue */
netif_wake_queue(dev);
@@ -563,7 +563,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
dev->stats.tx_bytes += lp->deferred_skb->len;
dev_kfree_skb_irq(lp->deferred_skb);
lp->deferred_skb = NULL;
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
}
@@ -1131,11 +1131,13 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
mac_address = of_get_mac_address(ofdev->dev.of_node);
- if (mac_address)
+ if (mac_address) {
/* Set the MAC address. */
memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
- else
- dev_warn(dev, "No MAC address found\n");
+ } else {
+ dev_warn(dev, "No MAC address found, using random\n");
+ eth_hw_addr_random(ndev);
+ }
/* Clear the Tx CSR's in case this is a restart */
__raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index d56f8693202b..ddced28e8247 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1144,8 +1144,8 @@ xirc2ps_interrupt(int irq, void *dev_id)
dev->stats.tx_packets += lp->last_ptr_value - n;
netif_wake_queue(dev);
}
- if (tx_status & 0x0002) { /* Execessive collissions */
- pr_debug("tx restarted due to execssive collissions\n");
+ if (tx_status & 0x0002) { /* Excessive collisions */
+ pr_debug("tx restarted due to excessive collisions\n");
PutByte(XIRCREG_CR, RestartTx); /* restart transmitter process */
}
if (tx_status & 0x0040)
@@ -1199,7 +1199,7 @@ xirc2ps_tx_timeout_task(struct work_struct *work)
struct net_device *dev = local->dev;
/* reset the card */
do_reset(dev,1);
- dev->trans_start = jiffies; /* prevent tx timeout */
+ netif_trans_update(dev); /* prevent tx timeout */
netif_wake_queue(dev);
}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 5138407941cf..7f127dc1b7ba 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -171,7 +171,6 @@ struct port {
struct npe *npe;
struct net_device *netdev;
struct napi_struct napi;
- struct phy_device *phydev;
struct eth_plat_info *plat;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */
@@ -562,7 +561,7 @@ static void ixp4xx_mdio_remove(void)
static void ixp4xx_adjust_link(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
- struct phy_device *phydev = port->phydev;
+ struct phy_device *phydev = dev->phydev;
if (!phydev->link) {
if (port->speed) {
@@ -976,8 +975,6 @@ static void eth_set_mcast_list(struct net_device *dev)
static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
- struct port *port = netdev_priv(dev);
-
if (!netif_running(dev))
return -EINVAL;
@@ -988,7 +985,7 @@ static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
return hwtstamp_get(dev, req);
}
- return phy_mii_ioctl(port->phydev, req, cmd);
+ return phy_mii_ioctl(dev->phydev, req, cmd);
}
/* ethtool support */
@@ -1005,22 +1002,9 @@ static void ixp4xx_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
}
-static int ixp4xx_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct port *port = netdev_priv(dev);
- return phy_ethtool_gset(port->phydev, cmd);
-}
-
-static int ixp4xx_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct port *port = netdev_priv(dev);
- return phy_ethtool_sset(port->phydev, cmd);
-}
-
static int ixp4xx_nway_reset(struct net_device *dev)
{
- struct port *port = netdev_priv(dev);
- return phy_start_aneg(port->phydev);
+ return phy_start_aneg(dev->phydev);
}
int ixp46x_phc_index = -1;
@@ -1054,11 +1038,11 @@ static int ixp4xx_get_ts_info(struct net_device *dev,
static const struct ethtool_ops ixp4xx_ethtool_ops = {
.get_drvinfo = ixp4xx_get_drvinfo,
- .get_settings = ixp4xx_get_settings,
- .set_settings = ixp4xx_set_settings,
.nway_reset = ixp4xx_nway_reset,
.get_link = ethtool_op_get_link,
.get_ts_info = ixp4xx_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
};
@@ -1259,7 +1243,7 @@ static int eth_open(struct net_device *dev)
}
port->speed = 0; /* force "link up" message */
- phy_start(port->phydev);
+ phy_start(dev->phydev);
for (i = 0; i < ETH_ALEN; i++)
__raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
@@ -1380,7 +1364,7 @@ static int eth_close(struct net_device *dev)
printk(KERN_CRIT "%s: unable to disable loopback\n",
dev->name);
- phy_stop(port->phydev);
+ phy_stop(dev->phydev);
if (!ports_open)
qmgr_disable_irq(TXDONE_QUEUE);
@@ -1405,6 +1389,7 @@ static int eth_init_one(struct platform_device *pdev)
struct port *port;
struct net_device *dev;
struct eth_plat_info *plat = dev_get_platdata(&pdev->dev);
+ struct phy_device *phydev = NULL;
u32 regs_phys;
char phy_id[MII_BUS_ID_SIZE + 3];
int err;
@@ -1466,14 +1451,14 @@ static int eth_init_one(struct platform_device *pdev)
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
mdio_bus->id, plat->phy);
- port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
- PHY_INTERFACE_MODE_MII);
- if (IS_ERR(port->phydev)) {
- err = PTR_ERR(port->phydev);
+ phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phydev)) {
+ err = PTR_ERR(phydev);
goto err_free_mem;
}
- port->phydev->irq = PHY_POLL;
+ phydev->irq = PHY_POLL;
if ((err = register_netdev(dev)))
goto err_phy_dis;
@@ -1484,7 +1469,7 @@ static int eth_init_one(struct platform_device *pdev)
return 0;
err_phy_dis:
- phy_disconnect(port->phydev);
+ phy_disconnect(phydev);
err_free_mem:
npe_port_tab[NPE_ID(port->id)] = NULL;
release_resource(port->mem_res);
@@ -1498,10 +1483,11 @@ err_free:
static int eth_remove_one(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
+ struct phy_device *phydev = dev->phydev;
struct port *port = netdev_priv(dev);
unregister_netdev(dev);
- phy_disconnect(port->phydev);
+ phy_disconnect(phydev);
npe_port_tab[NPE_ID(port->id)] = NULL;
npe_release(port->npe);
release_resource(port->mem_res);